prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from urllib.request import urlopen
from http.cookiejar import CookieJar
from io import StringIO
from app.extensions import cache
from app.api.constants import PERMIT_HOLDER_CACHE, DORMANT_WELLS_CACHE, LIABILITY_PER_WELL_CACHE, TIMEOUT_15_MINUTES, TIMEOUT_60_MINUTES, TIMEOUT_12_HOURS, TIMEOUT_1_YEAR
from flask import Flask, current_app
from threading import Thread
import requests
import urllib
import pandas as pd
import pyarrow as pa
import time
from .ogc_data_constants import PERMIT_HOLDER_CSV_DATA, DORMANT_WELLS_CSV_DATA, LIABILITY_PER_WELL_CSV_DATA
# TODO: Stick into environment variables
PERMIT_HOLDER_CSV = 'http://reports.bcogc.ca/ogc/f?p=200:201:14073940726161:CSV::::'
DORMANT_WELLS_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:81:9680316354055:CSV::::'
LIABILITY_PER_WELL_CSV = 'https://reports.bcogc.ca/ogc/f?p=200:10:10256707131131:CSV::::'
session = requests.session()
def refreshOGCdata(app, cache_key, csv_url, process):
with app.app_context():
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
expiry_token = cache.get(cache_key + '_EXPIRY_TOKEN')
if not expiry_token:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data not found.')
# set 15 minute token to mitigate multiple threads requesting data at the same time
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_15_MINUTES)
else:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Cached data up to date.')
return
try:
cookieProcessor = urllib.request.HTTPCookieProcessor()
opener = urllib.request.build_opener(cookieProcessor)
response = session.get(csv_url)
df = pd.read_table(StringIO(response.text), sep=",")
df = process(df)
updated_from_web = True
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Successful get from OGC reporting.')
df = process(df)
except:
# on error, if we don't have data in the cache initialize it from static content
if not data:
current_app.logger.debug(
f'OGC DATA SERVICE - {cache_key} - Falling back to static content.')
if cache_key is PERMIT_HOLDER_CACHE:
df = pd.read_table(StringIO(PERMIT_HOLDER_CSV_DATA), sep=",")
if cache_key is DORMANT_WELLS_CACHE:
df = pd.read_table(StringIO(DORMANT_WELLS_CSV_DATA), sep=",")
if cache_key is LIABILITY_PER_WELL_CACHE:
df = pd.read_table(StringIO(LIABILITY_PER_WELL_CSV_DATA), sep=",")
df = process(df)
row_count = df.shape[0]
# only update cache if there is a good dataset
if row_count > 1:
current_app.logger.debug(f'OGC DATA SERVICE - {cache_key} - Updating cached data.')
cache.set(
cache_key,
serializer.serialize(df).to_buffer().to_pybytes(),
timeout=TIMEOUT_1_YEAR)
if updated_from_web:
cache.set(cache_key + '_EXPIRY_TOKEN', True, timeout=TIMEOUT_60_MINUTES)
else:
current_app.logger.warning(
f'OGC DATA SERVICE - {cache_key} - FAILED TO RETRIEVE UPDATED DATA')
class OGCDataService():
@classmethod
def refreshAllData(cls):
cls.getPermitHoldersDataFrame()
cls.getDormantWellsDataFrame()
cls.getLiabilityPerWellDataFrame()
@classmethod
def getOGCdataframe(cls, cache_key, csv_url, process):
serializer = pa.default_serialization_context()
data = cache.get(cache_key)
app = current_app._get_current_object()
#if empty dataset refresh data synchronously, otherwise refresh in the background and continue
if not data:
df = refreshOGCdata(app, cache_key, csv_url, process)
else:
thread = Thread(
target=refreshOGCdata, args=(
app,
cache_key,
csv_url,
process,
))
thread.daemon = True
thread.start()
#update data and return
data = cache.get(cache_key)
if data:
df = serializer.deserialize(data)
return df
@classmethod
def getPermitHoldersDataFrame(cls):
def process(df):
df.columns = [
'operator_id', 'organization_name', 'phone_num', 'address_line_1', 'address_line_2',
'city', 'province', 'postal_code', 'country'
]
return df
return cls.getOGCdataframe(PERMIT_HOLDER_CACHE, PERMIT_HOLDER_CSV, process)
@classmethod
def getDormantWellsDataFrame(cls):
def process(df):
df.columns = [
'operator_name', 'operator_id', 'well_auth_number', 'well_name', 'dormant_status',
'current_status', 'well_dormancy_date', 'site_dormancy_date', 'site_dormancy_type',
'site_dormant_status', 'surface_location', 'field', 'abandonment_date',
'last_spud_date', 'last_rig_rels_date', 'last_completion_date',
'last_active_production_year', 'last_active_inj_display_year',
'wellsite_dormancy_declaration_date', 'multi_well'
]
df['well_dormancy_date'] = pd.to_datetime(
df['well_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['site_dormancy_date'] = pd.to_datetime(
df['site_dormancy_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['abandonment_date'] = pd.to_datetime(
df['abandonment_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_spud_date'] = pd.to_datetime(
df['last_spud_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_rig_rels_date'] = pd.to_datetime(
df['last_rig_rels_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_completion_date'] = pd.to_datetime(
df['last_completion_date'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_active_production_year'] = pd.to_datetime(
df['last_active_production_year'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if pd.notnull(x) else None)
df['last_active_inj_display_year'] = pd.to_datetime(
df['last_active_inj_display_year'],
errors='coerce').apply(lambda x: x.strftime('%Y-%m-%d') if | pd.notnull(x) | pandas.notnull |
#!/usr/bin/env python
# coding: utf-8
# # N-BEATS
#
# ### Uses nbeats model as prediction of future.
# In[1]:
import os, sys
from tqdm import tqdm
from subseasonal_toolkit.utils.notebook_util import isnotebook
if isnotebook():
# Autoreload packages that are modified
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
else:
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist, euclidean
from datetime import datetime, timedelta
from ttictoc import tic, toc
from subseasonal_data.utils import get_measurement_variable
from subseasonal_toolkit.utils.general_util import printf
from subseasonal_toolkit.utils.experiments_util import get_id_name, get_th_name, get_first_year, get_start_delta
from subseasonal_toolkit.utils.models_util import (get_submodel_name, start_logger, log_params, get_forecast_filename,
save_forecasts)
from subseasonal_toolkit.utils.eval_util import get_target_dates, mean_rmse_to_score, save_metric
from sklearn.linear_model import *
from subseasonal_data import data_loaders
# In[ ]:
#
# Specify model parameters
#
if not isnotebook():
# If notebook run as a script, parse command-line arguments
parser = ArgumentParser()
parser.add_argument("pos_vars",nargs="*") # gt_id and horizon
parser.add_argument('--target_dates', '-t', default="std_test")
args, opt = parser.parse_known_args()
# Assign variables
gt_id = get_id_name(args.pos_vars[0]) # "contest_precip" or "contest_tmp2m"
horizon = get_th_name(args.pos_vars[1]) # "12w", "34w", or "56w"
target_dates = args.target_dates
else:
# Otherwise, specify arguments interactively
gt_id = "contest_tmp2m"
horizon = "34w"
target_dates = "std_contest"
#
# Process model parameters
#
# One can subtract this number from a target date to find the last viable training date.
start_delta = timedelta(days=get_start_delta(horizon, gt_id))
# Record model and submodel name
model_name = "nbeats"
submodel_name = get_submodel_name(model_name)
FIRST_SAVE_YEAR = 2007 # Don't save forecasts from years prior to FIRST_SAVE_YEAR
if not isnotebook():
# Save output to log file
logger = start_logger(model=model_name,submodel=submodel_name,gt_id=gt_id,
horizon=horizon,target_dates=target_dates)
# Store parameter values in log
params_names = ['gt_id', 'horizon', 'target_dates']
params_values = [eval(param) for param in params_names]
log_params(params_names, params_values)
# In[ ]:
printf('Loading target variable and dropping extraneous columns')
tic()
var = get_measurement_variable(gt_id)
gt = data_loaders.get_ground_truth(gt_id).loc[:,["start_date","lat","lon",var]]
toc()
# In[ ]:
printf('Pivoting dataframe to have one column per lat-lon pair and one row per start_date')
tic()
gt = gt.set_index(['lat','lon','start_date']).squeeze().unstack(['lat','lon'])
toc()
# In[ ]:
#
# Make predictions for each target date
#
from fbprophet import Prophet
from pandas.tseries.offsets import DateOffset
def get_first_fourth_month(date):
targets = {(1, 31), (3,31), (5, 31), (7, 31), (9, 30), (11,30)}
while (date.month, date.day) not in targets:
date = date - DateOffset(days=1)
return date
from dateutil.relativedelta import relativedelta
def get_predictions(date):
# take the first (12/31, 8/31, 4/30) right before the date.
true_date = get_first_fourth_month(date)
if horizon == "34w":
prediction_lag = 28
if horizon == "56w":
prediction_lag = 42
shifts = np.flip(np.arange(0, 13)) * 4
cmd_prefix = "python main.py --features "
cmd_features = ""
for shift in shifts:
cmd_features = cmd_features + f"{gt_id}_shift{shift} "
a = len(cmd_features)
cmd_features += f'\\'
b = len(cmd_features)
epochs = 20
cnt = 0
target_date = (true_date - timedelta(days=prediction_lag) - relativedelta(years=1)).strftime("%Y-%m-%d")
val_begin_date = (true_date - timedelta(days=prediction_lag) - relativedelta(years=1) + timedelta(days=1)).strftime("%Y-%m-%d")
val_end_date = (true_date - timedelta(days=prediction_lag)).strftime("%Y-%m-%d")
test_begin_date = true_date.strftime("%Y-%m-%d")
test_end_date = (true_date + relativedelta(months=2) -timedelta(days=1)).strftime("%Y-%m-%d")
log_params(["target_date"], [target_date])
converted_date = date.strftime('%Y_%m_%d')
cmd_suffix = f"--task '{gt_id}_{horizon}' --train --evaluate --path-to-feather-file 'data/gt-{gt_id}-14d.h5' --begin-date 1979-01-01 \ --target-date {target_date} \ --val-begin-date {val_begin_date} \ --val-end-date {val_end_date} \ --test-begin-date {test_begin_date} --test-end-date {test_end_date} \ --batch-size 512 --max_epochs {epochs} --grid-count -1 --gpus 1 --gpu-idx {cnt} --full \ --default_root_dir {gt_id}_{horizon}_exp_{converted_date} "
cmd = cmd_prefix + cmd_features + cmd_suffix
log_params(["cmd"], [cmd])
print(cmd)
os.system(cmd) # comment to not run the actual program.
# open the file where this is outputted.
date_string = date.strftime("%Y%m%d")
filename = f"nbeats-predictions/{gt_id}_{horizon}-{date_string}.h5"
# return the answer.
return pd.read_hdf(filename).values
tic()
target_date_objs = pd.Series(get_target_dates(date_str=target_dates,horizon=horizon))
rmses = | pd.Series(index=target_date_objs, dtype=np.float64) | pandas.Series |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum_no_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = | DataFrame(np.nan, index=df.index, columns=df.columns) | pandas.DataFrame |
## SETUP ##
## dependencies
import pandas as pd
## logging
sys.stdout = open(snakemake.log[0], 'w')
sys.stderr = open(snakemake.log[0], 'w')
## input files
input_dict = {
'taxlist' : snakemake.input['taxlist'],
'slvmap' : snakemake.input['slvmap'],
'dups' : snakemake.input['dups'],
}
## output files
output_dict = {
'kraknames_S' : snakemake.output['kraknames_S'],
'kraknodes_S' : snakemake.output['kraknodes_S'],
'krakseq2tax_S' : snakemake.output['krakseq2tax_S'],
'kraknames_G' : snakemake.output['kraknames_G'],
'kraknodes_G' : snakemake.output['kraknodes_G'],
'krakseq2tax_G' : snakemake.output['krakseq2tax_G'],
'kronataxtab_S' : snakemake.output['kronataxtab_S'],
'kronataxlist_S' : snakemake.output['kronataxlist_S'],
'kronaseq2tax_S' : snakemake.output['kronaseq2tax_S'],
'kronataxtab_G' : snakemake.output['kronataxtab_G'],
'kronataxlist_G' : snakemake.output['kronataxlist_G'],
'kronaseq2tax_G' : snakemake.output['kronaseq2tax_G'],
'qiimetax_S' : snakemake.output['qiimetax_S'],
'qiimetax_G' : snakemake.output['qiimetax_G']
}
# input_dict = {
# 'slvmap' : "METADATA/Reference_Sequences/silva/slvmap.txt",
# 'taxlist' : "METADATA/Reference_Sequences/silva/taxlist.txt",
# 'dups' : "METADATA/Reference_Sequences/silva/reference.dups"
# }
# output_dict = {
# 'kraknames_S' : "METADATA/Reference_Sequences/silva/kraken2/species/taxonomy/names.dmp",
# 'kraknodes_S' : "METADATA/Reference_Sequences/silva/kraken2/species/taxonomy/nodes.dmp",
# 'krakseq2tax_S' : "METADATA/Reference_Sequences/silva/kraken2/species/seqid2taxid.map",
# 'kraknames_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/taxonomy/names.dmp",
# 'kraknodes_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/taxonomy/nodes.dmp",
# 'krakseq2tax_G' : "METADATA/Reference_Sequences/silva/kraken2/genus/seqid2taxid.map",
# 'kronataxtab_S' : "METADATA/Reference_Sequences/silva/krona/species/taxonomy.tab",
# 'kronataxtab_G' : "METADATA/Reference_Sequences/silva/krona/genus/taxonomy.tab",
# 'kronataxlist_S' : "METADATA/Reference_Sequences/silva/krona/species/taxlist.txt",
# 'kronataxlist_G' : "METADATA/Reference_Sequences/silva/krona/genus/taxlist.txt",
# 'kronaseq2tax_S' : "METADATA/Reference_Sequences/silva/krona/species/seqid2taxid.map",
# 'qiimetax_G' : "METADATA/Reference_Sequences/silva/qiime/genus/taxonomy.tsv",
# 'qiimetax_S' : "METADATA/Reference_Sequences/silva/qiime/species/taxonomy.tsv"
# }
# import os
# krak_dir_S = "/".join(output_dict['krakseq2tax_S'].split("/")[:-1])
# krak_dir_G = "/".join(output_dict['krakseq2tax_G'].split("/")[:-1])
# krona_dir_S = "/".join(output_dict['kronataxtab_S'].split("/")[:-1])
# krona_dir_G = "/".join(output_dict['kronataxtab_G'].split("/")[:-1])
# qiime_dir_S = "/".join(output_dict['qiimetax_S'].split("/")[:-1])
# qiime_dir_G = "/".join(output_dict['qiimetax_G'].split("/")[:-1])
# os.makedirs( krak_dir_S + "/taxonomy" )
# os.makedirs( krak_dir_G + "/taxonomy" )
# os.makedirs( krona_dir_S )
# os.makedirs( krona_dir_G )
# os.makedirs( qiime_dir_S )
# os.makedirs( qiime_dir_G )
## LOAD DATA ##
## load taxID list
df_taxlist = pd.read_csv(input_dict['taxlist'], sep='\t',
names=['pathname','taxID','rank','remark','release'],
usecols=['pathname','taxID','rank'])
## load SILVA taxIDs w/o species classification
df_accmap = pd.read_csv(input_dict['slvmap'], sep='\t',
skiprows=1,
names=['accID','start','end','path','name','taxID'],
usecols=['accID','start','end','name','taxID'])
## PER ACCID ##
## the concept of this paragraph was adapted from <NAME>, Little Rock, AR, <EMAIL>
## add path to accmap
df_accmap = | pd.merge(df_accmap, df_taxlist.loc[:,['pathname','taxID']], how='left', on='taxID') | pandas.merge |
# -*- encoding: utf8 -*-
from ..benchmark import *
from .partition_shape_experiment import *
from ..cluster import *
from ..util import *
import lue
# from ..plot.scale.square_root import SquareRootScale
import dateutil.relativedelta
import dateutil.parser
import matplotlib
# matplotlib.use("PDF")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.scale as mscale
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import seaborn as sns
# import tzlocal
import json
import math
import tempfile
# mscale.register_scale(SquareRootScale)
def benchmark_meta_to_lue_json(
benchmark_pathname,
lue_pathname,
cluster,
benchmark,
experiment):
# Read benchmark JSON
benchmark_json = json.loads(open(benchmark_pathname).read())
environment_json = benchmark_json["environment"]
nr_workers = [environment_json["nr_workers"]]
lue_json = {
"dataset": {
"phenomena": [
{
"name": "benchmark",
"collection_property_sets": [
{
"name": "meta_information",
"properties": [
{
"name": "name",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [experiment.program_name]
},
{
"name": "system_name",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [cluster.name]
},
{
"name": "command",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [experiment.command_pathname]
},
{
"name": "kind",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [experiment.name]
},
{
"name": "scenario_name",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [benchmark.scenario_name]
},
{
"name": "description",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "string",
"value": [experiment.description]
},
{
"name": "nr_workers",
"shape_per_object": "same_shape",
"value_variability": "constant",
"datatype": "uint64",
"value": nr_workers
},
]
}
]
}
]
}
}
# Write results
open(lue_pathname, "w").write(
json.dumps(lue_json, sort_keys=False, indent=4))
def benchmark_to_lue_json(
benchmark_pathname,
lue_json_pathname,
epoch):
# Read benchmark JSON
benchmark_json = json.loads(open(benchmark_pathname).read())
time_units = benchmark_json["unit"]
benchmark_epoch = dateutil.parser.isoparse(benchmark_json["start"])
# We assume here that benchmarks are located at different time points
# in seconds from each other. If this is not the case, use time_units
# to figure out what units to use instead.
epoch_offset = int((benchmark_epoch - epoch).total_seconds())
if epoch_offset < 0:
raise RuntimeError(
"epoch passed in is later than epoch from benchmark: "
"{} > {}".format(epoch, benchmark_epoch))
# Calculate number of seconds sinds the epoch
time_points = [
dateutil.parser.isoparse(timing["start"])
for timing in benchmark_json["timings"]]
time_points = [
epoch_offset + int((time_point - benchmark_epoch).total_seconds())
for time_point in time_points]
time_points = [time_points[0]]
property_description = "Amount of time a measurement took"
durations = [timing["duration"] for timing in benchmark_json["timings"]]
# Object tracking: a benchmark contains property values (durations)
# for a single object (piece of software being benchmarked). The ID of
# this object is some value, like 5.
# The active set indices are 0, 1, 2, 3, ...
nr_active_sets = len(time_points)
active_set_idx = list(range(nr_active_sets))
active_object_id = nr_active_sets * [5]
array_shape = list(benchmark_json["task"]["array_shape"])
partition_shape = list(benchmark_json["task"]["partition_shape"])
lue_json = {
"dataset": {
"phenomena": [
{
"name": "benchmark",
"property_sets": [
{
"name": "measurement",
"description":
"Information per benchmark measurement",
"object_tracker": {
"active_set_index": active_set_idx,
"active_object_id": active_object_id
},
"time_domain": {
"clock": {
"epoch": {
"kind": "common_era",
"origin": epoch.isoformat(),
"calendar": "gregorian"
},
"unit": time_units,
"tick_period_count": 1
},
"time_point": time_points
},
"properties": [
{
"name": "duration",
"description": property_description,
"shape_per_object": "same_shape",
"value_variability": "variable",
"shape_variability": "constant_shape",
"datatype": "uint64",
"shape": [len(durations)],
"value": durations
},
{
"name": "array_shape",
"shape_per_object": "same_shape",
"value_variability": "variable",
"shape_variability": "constant_shape",
"datatype": "uint64",
"shape": [len(array_shape)],
"value": array_shape
},
{
"name": "partition_shape",
"shape_per_object": "same_shape",
"value_variability": "variable",
"shape_variability": "constant_shape",
"datatype": "uint64",
"shape": [len(partition_shape)],
"value": partition_shape
},
]
}
]
}
]
}
}
# Write results
open(lue_json_pathname, "w").write(
json.dumps(lue_json, sort_keys=False, indent=4))
def determine_epoch(
cluster,
benchmark,
experiment):
array_shapes = experiment.array.shapes
partition_shapes = experiment.partition.shapes
epoch = None
for array_shape in array_shapes:
for partition_shape in partition_shapes:
benchmark_pathname = experiment.benchmark_result_pathname(
cluster.name, benchmark.scenario_name, array_shape,
"x".join([str(extent) for extent in partition_shape]),
"json")
assert os.path.exists(benchmark_pathname), benchmark_pathname
benchmark_json = json.loads(open(benchmark_pathname).read())
benchmark_start = dateutil.parser.isoparse(benchmark_json["start"])
if epoch is None:
epoch = benchmark_start
else:
epoch = epoch if epoch < benchmark_start else benchmark_start
return epoch
def import_raw_results(
cluster,
benchmark,
experiment):
"""
Import all raw benchmark results into a new LUE file
This is a two step process:
1. Translate each raw benchmark result into a LUE JSON file
2. Import all LUE JSON files into a single LUE file
"""
# Each benchmark containing timings has a start location in time and
# an overall duration. The location in time can be used to position
# the benchmark in time. Most likely, all benchmarks are started at
# different locations in time. The duration is not that relevant.
# The timings are represented by a location in time and a
# duration. The location in time is not that relevant. Duration is.
# To position all benchmarks in time, we need a single starting time
# point to use as the clock's epoch and calculate the distance of
# each benchmark's start time point from this epoch.
epoch = determine_epoch(cluster, benchmark, experiment)
lue_dataset_pathname = experiment.result_pathname(
cluster.name, benchmark.scenario_name, "data", "lue")
if os.path.exists(lue_dataset_pathname):
os.remove(lue_dataset_pathname)
array_shapes = list(experiment.array.shapes)
partition_shapes = list(experiment.partition.shapes)
metadata_written = False
for array_shape in array_shapes:
for partition_shape in partition_shapes:
result_pathname = experiment.benchmark_result_pathname(
cluster.name, benchmark.scenario_name, array_shape,
"x".join([str(extent) for extent in partition_shape]),
"json")
assert os.path.exists(result_pathname), result_pathname
if not metadata_written:
with tempfile.NamedTemporaryFile(suffix=".json") as lue_json_file:
benchmark_meta_to_lue_json(
result_pathname, lue_json_file.name, cluster,
benchmark, experiment)
import_lue_json(lue_json_file.name, lue_dataset_pathname)
metadata_written = True
with tempfile.NamedTemporaryFile(suffix=".json") as lue_json_file:
benchmark_to_lue_json(result_pathname, lue_json_file.name, epoch)
import_lue_json(lue_json_file.name, lue_dataset_pathname)
lue.assert_is_valid(lue_dataset_pathname)
return lue_dataset_pathname
def meta_information_dataframe(
lue_meta_information):
# TODO: BUG: double delete
# lue_meta_information.properties["kind"].value[0:0]
assert \
lue_meta_information.properties["kind"].value[:] == ["partition_shape"]
name = lue_meta_information.properties["name"].value[:]
system_name = lue_meta_information.properties["system_name"].value[:]
scenario_name = lue_meta_information.properties["scenario_name"].value[:]
# # Pandas does not support nD array elements. Convert (each) shape
# # to string.
# assert array_shape.dtype == np.uint64
# array_shape = [str(shape) for shape in array_shape]
meta_information = pd.DataFrame({
"name": name,
"system_name": system_name,
"scenario_name": scenario_name,
})
return meta_information
def measurement_dataframe(
lue_measurement):
array_shape = lue_measurement.properties["array_shape"].value[:]
partition_shape = lue_measurement.properties["partition_shape"].value[:]
duration = lue_measurement.properties["duration"].value[:]
assert len(duration) == len(partition_shape) == len(array_shape)
# count durations per benchmark
duration = pd.DataFrame(duration)
# array shape per benchmark
array_shape = pd.DataFrame(array_shape)
# partition shape per benchmark
partition_shape = pd.DataFrame(partition_shape)
assert (duration.index == partition_shape.index).all()
assert (duration.index == array_shape.index).all()
measurement = | pd.concat([array_shape, partition_shape, duration], axis=1) | pandas.concat |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 18:36:11 2018
@author: <NAME>
"""
from env import env
from rl_brain import agent_manager
import matplotlib.pyplot as plt
from tqdm import tqdm
import datetime
import pandas as pd
import numpy as np
import os
class trainer():
def __init__(self, num_stations, action_space, episode, threshold, collaboration):
# Performance Metrics
self.success_ratios = {}
self.team_cumulative_rewards = {}
self.timestamp = self.get_timestamp(True)
self.result_dir = "./performance_log/" + self.timestamp + "/graphs/"
if not os.path.exists(self.result_dir):
os.makedirs(self.result_dir)
# Env, Agent, and Key Attributes
self.env = env(num_stations, 50, threshold)
self.init_stocks = []
self.current_stocks = self.init_stocks
self.num_stations = num_stations
self.action_space = action_space
self.q_tables = []
self.merged_table = 0
self.mode = "learn"
self.collaboration = collaboration
self.threshold = threshold
for idx in range(num_stations):
self.init_stocks.append(50)
self.agent_manager = agent_manager(self.num_stations,
self.action_space, self.init_stocks, self.collaboration)
if type(episode) == int:
self.eps = [episode]
def run(self, mode):
'''
This function runs the agent-station training and testing procedures
using multithreading.
'''
print("==========================")
print("start {}ing sessions of {} stations ...".format(mode, self.num_stations))
self.mode = mode
self.success_ratios[mode] = []
self.team_cumulative_rewards[mode] = []
if self.mode == "test":
# Create new env and agents; run test workflow
self.env.eps_reset()
self.agent_manager.batch_reset(self.merged_table)
for num_eps in self.eps:
for eps in tqdm(range(num_eps)):
upload_flag = False
if num_eps % 500 == 0 and self.collaboration:
upload_flag = True
for hour in range(0, 24):
# Agent-Station Interaction with multi-threading
actions = self.agent_manager.batch_choose_action(self.current_stocks)
current_hour, old_stocks, new_stocks, rewards, day_end = self.env.ping(actions)
self.agent_manager.batch_learn(old_stocks, actions, rewards,
new_stocks, day_end, upload = upload_flag)
self.current_stocks = new_stocks
self.success_ratios[self.mode].append(self.env.cal_success_ratio())
self.team_cumulative_rewards[self.mode].append(self.agent_manager.get_team_rewards())
self.env.eps_reset()
self.agent_manager.eps_reset()
print("-------------------------")
self.agent_manager.save_q_tables(self.timestamp)
if self.mode == "learn":
self.q_tables, self.merged_table = self.agent_manager.get_q_tables()
def graph_performance(self, num_eps):
window = num_eps / 200
# Rolling Average of Group Success Ratio
fig = plt.figure(figsize = (10, 8))
learn_rolling_average = pd.Series(self.success_ratios["learn"]).rolling(window).mean()
test_rolling_average = | pd.Series(self.success_ratios["test"]) | pandas.Series |
import datetime
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from models.tools import f1_score, f1_score_lgbm, load_data
# path
path_to_data = "data/"
path_to_submissions = "submissions/"
path_to_stacking = "stacking/"
path_to_plots = "models/plots/"
# tuned hyper-parameters
parameters = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
# 'metric': {},
'num_leaves': 200,
'learning_rate': 0.1,
'feature_fraction': 0.5,
'bagging_fraction': 0.6,
'bagging_freq': 5,
'verbose': 0,
"min_data_in_leaf": 3,
"max_depth": 150
}
# used features
my_features_string = [
"date_diff",
"overlap_title",
"common_author",
# "score_1_2",
# "score_2_1",
"cosine_distance",
"journal_similarity",
# "overlapping_words_abstract",
# "jaccard",
# "adar",
"preferential_attachment",
# "resource_allocation_index",
"out_neighbors",
"in_neighbors",
"common_neighbors",
"shortest_path",
"popularity",
"common_successors",
"common_predecessors",
"paths_of_length_one",
"authors_citation",
"coauthor_score"
# "katz"
# "katz_2"
]
my_features_acronym = ["_".join(list(map(lambda x: x[0], string.split('_')))) for string in my_features_string]
# load data
(X_train,
X_test,
Y_train,
my_features_index,
my_features_dic) = load_data(my_features_string)
# print user info
now = datetime.datetime.now()
print("date: " + str(now))
print("features: " + str(my_features_string))
print("model: LGBM")
print("parameters:")
print(parameters)
print("cross validation:")
# instantiate Kfold and predictions placeholder
k = 5
kf = KFold(k)
predictions = np.zeros((X_test.shape[0], k))
predictions_train = np.zeros(X_train.shape[0])
i = 0
# for each fold store predictions on test set and print validation results
results = []
print('Start training...')
for train_index, test_index in kf.split(X_train):
lgb_train = lgb.Dataset(X_train[train_index], Y_train[train_index])
lgb_eval = lgb.Dataset(X_train[test_index], Y_train[test_index], reference=lgb_train)
gbm = lgb.train(parameters,
train_set=lgb_train,
num_boost_round=100,
valid_sets=lgb_eval,
verbose_eval=40,
feval=f1_score_lgbm
)
res = gbm.predict(X_test)
Y_pred = gbm.predict(X_train[test_index])
Y_pred_train = gbm.predict(X_train[train_index])
predictions[:, i] = res
predictions_train[test_index] = Y_pred
print("train: " + str(f1_score(Y_train[train_index], Y_pred_train.round())))
print("test: " + str(f1_score(Y_train[test_index], Y_pred.round())))
i += 1
# save submission file
Y_test = (np.sum(predictions, axis=1) > 2.5).astype(int)
submission = | pd.DataFrame(Y_test) | pandas.DataFrame |
#!/usr/bin/env python
"""
Parses SPINS' EA log files into BIDS tsvs.
Usage:
dm_parse_ea.py [options] <study>
Arguments:
<study> A datman study to parse task data for.
Options:
--experiment <experiment> Single datman session to generate TSVs for
--timings <timing_path> The full path to the EA timings file.
Defaults to the 'EA-timing.csv' file in
the assets folder.
--lengths <lengths_path> The full path to the file containing the
EA vid lengths. Defaults to the
'EA-vid-lengths.csv' in the assets folder.
--regex <regex> The regex to use to find the log files to
parse. [default: *UCLAEmpAcc*]
--debug Set log level to debug
"""
import re
import os
import glob
import logging
import pandas as pd
import numpy as np
from docopt import docopt
import datman.config
import datman.scanid
logging.basicConfig(
level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s"
)
logger = logging.getLogger(os.path.basename(__file__))
# reads in log file and subtracts the initial TRs/MRI startup time
def read_in_logfile(path):
log_file = pd.read_csv(path, sep="\t", skiprows=3)
time_to_subtract = int(log_file.Duration[log_file.Code == "MRI_start"])
log_file.Time = log_file.Time - time_to_subtract
return log_file
# Remove the rating when there is a scanner response during the task instead of just at the start
def clean_logfile(log_file):
scan_response = ["101", "104"]
# 1st list of indexes to remove scan responses and ratings in the dataframe
indexes_to_drop = []
# Remove the rating that come after the scan response when there is a 102/103 response right before or after
# Also remove the rating that come after scan response and carry over to the next video
# The rating is always registered two indexes after the scan response
for index, row in log_file.iterrows():
if ("rating" in log_file["Code"][index]) and any(
resp in log_file["Code"][index - 2] for resp in scan_response
):
# index to select the rating to drop
indexes_to_drop.append(index)
# index - 2 to select the scan response to drop
indexes_to_drop.append(index - 2)
if len(indexes_to_drop) == 0:
log_file_cleaned = log_file
else:
log_file_cleaned = log_file.drop(log_file.index[indexes_to_drop])
log_file_cleaned = log_file_cleaned.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop)/2} registered rating occurred before or after actual rating"
)
# 2nd list of indexes to drop the remaining scan responses and ratings
indexes_to_drop_1 = []
# Remove the remaining rating response come right after scan response
# The rating is registered one index after the scan response
for index, row in log_file_cleaned.iterrows():
if ("rating" in log_file_cleaned["Code"][index]) and any(
resp in log_file_cleaned["Code"][index - 1]
for resp in scan_response
):
# index to select the remaining rating to drop
indexes_to_drop_1.append(index)
# index - 1 select the remaing scan response to drop
indexes_to_drop_1.append(index - 1)
if len(indexes_to_drop_1) == 0:
final_log_file = log_file_cleaned
else:
final_log_file = log_file_cleaned.drop(
log_file_cleaned.index[indexes_to_drop_1]
)
final_log_file = final_log_file.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop_1)/2} rating registered followed scanner responses"
)
return final_log_file
# Grabs the starts of blocks and returns rows for them
def get_blocks(log, vid_info):
# identifies the video trial types (as opposed to button press events etc)
mask = ["vid" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log.loc[mask]["Time"],
"trial_type": log.loc[mask]["Event Type"],
"movie_name": log.loc[mask]["Code"],
}
)
df["trial_type"] = df["movie_name"].apply(
lambda x: "circle_block" if "cvid" in x else "EA_block"
)
df["duration"] = df["movie_name"].apply(
lambda x: int(vid_info[x]["duration"]) * 10000
if x in vid_info
else pd.NA
)
df["stim_file"] = df["movie_name"].apply(
lambda x: vid_info[x]["stim_file"] if x in vid_info else pd.NA
)
df["end"] = df["onset"] + df["duration"]
return df
def format_vid_info(vid):
vid.columns = [c.lower() for c in vid.columns]
vid = vid.rename(index={0: "stim_file", 1: "duration"})
vid = vid.to_dict()
return vid
def read_in_standard(timing_path):
df = pd.read_csv(timing_path).astype(str)
df.columns = [c.lower() for c in df.columns]
df_dict = df.drop([0, 0]).reset_index(drop=True).to_dict(orient="list")
return df_dict
def get_series_standard(gold_standard, block_name):
return [float(x) for x in gold_standard[block_name] if x != "nan"]
def get_ratings(log):
rating_mask = ["rating" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log["Time"].loc[rating_mask].values,
"participant_value": log.loc[rating_mask]["Code"].values,
"event_type": "button_press",
"duration": 0,
}
)
# Pull rating value from formatted string
df["participant_value"] = df["participant_value"].str.strip().str[-1]
return df
def combine_dfs(blocks, ratings):
# combines the block rows with the ratings rows and sorts them
combo = blocks.append(ratings).sort_values("onset").reset_index(drop=True)
mask = pd.notnull(combo["trial_type"])
combo["space_b4_prev"] = combo["onset"].diff(periods=1)
combo["first_button_press"] = combo["duration"].shift() > 0
combo2 = combo.drop(
combo[
(combo["space_b4_prev"] < 1000)
& (combo["first_button_press"] == True)
].index
).reset_index(drop=True)
mask = pd.notnull(combo2["trial_type"])
block_start_locs = combo2[mask].index.values
last_block = combo2.iloc[block_start_locs[len(block_start_locs) - 1]]
end_row = {
"onset": last_block.end,
"rating_duration": 0,
"event_type": "last_row",
"duration": 0,
"participant_value": last_block.participant_value,
}
combo2 = combo2.append(end_row, ignore_index=True).reset_index(drop=True)
mask = pd.notnull(combo2["trial_type"])
block_start_locs = combo2[mask].index.values
combo2["rating_duration"] = combo2["onset"].shift(-1) - combo2[
"onset"
].where(
mask == False
) # noqa: E712
for i in range(len(block_start_locs)):
if block_start_locs[i] != 0:
combo2.rating_duration[block_start_locs[i - 1]] = (
combo2.end[block_start_locs[i - 1]]
- combo2.onset[block_start_locs[i - 1]]
)
for i in block_start_locs:
new_row = {
"onset": combo2.onset[i],
"rating_duration": combo2.onset[i + 1] - combo2.onset[i],
"event_type": "default_rating",
"duration": 0,
"participant_value": 5,
}
combo2 = combo2.append(new_row, ignore_index=True)
combo2 = combo2.sort_values(
by=["onset", "event_type"], na_position="first"
).reset_index(drop=True)
return combo2
def block_scores(ratings_dict, combo):
"""
Compute Pearson correlation between gold standard ratings
and participant ratings
"""
list_of_rows = []
summary_vals = {}
mask = pd.notnull(combo["trial_type"])
block_start_locs = combo[mask].index.values
block_start_locs = np.append(
block_start_locs, combo.tail(1).index.values, axis=None
)
for idx in range(1, len(block_start_locs)):
block_start = combo.onset[block_start_locs[idx - 1]]
block_end = combo.end[block_start_locs[idx - 1]]
block = combo.iloc[block_start_locs[idx - 1] : block_start_locs[idx]][
pd.notnull(combo.event_type)
]
block_name = (
combo.movie_name.iloc[
block_start_locs[idx - 1] : block_start_locs[idx]
][pd.notnull(combo.movie_name)]
.reset_index(drop=True)
.astype(str)
.get(0)
)
gold = get_series_standard(ratings_dict, block_name)
if "cvid" in block_name:
interval = np.arange(
combo.onset[block_start_locs[idx - 1]],
combo.end[block_start_locs[idx - 1]],
step=40000,
)
else:
interval = np.arange(
combo.onset[block_start_locs[idx - 1]],
combo.end[block_start_locs[idx - 1]],
step=20000,
)
if len(gold) < len(interval):
interval = interval[: len(gold)]
logger.warning(
"gold standard is shorter than the number of pt "
f"ratings. pt ratings truncated, block: {block_name}",
)
if len(interval) < len(gold):
gold = gold[: len(interval)]
logger.warning(
"number of pt ratings is shorter than the number "
f"of gold std, gold std truncated, block: {block_name}",
)
# this is to append for the remaining fraction of a second (so that
# the loop goes to the end i guess...)- maybe i dont need to do this
interval = np.append(interval, block_end)
two_s_avg = []
for x in range(len(interval) - 1):
start = interval[x]
end = interval[x + 1]
sub_block = block[
block["onset"].between(start, end)
| block["onset"].between(start, end).shift(-1)
]
block_length = end - start
if len(sub_block) != 0:
ratings = []
for index, row in sub_block.iterrows():
if row.onset < start:
numerator = (row.onset + row.rating_duration) - start
else:
if (row.onset + row.rating_duration) <= end:
numerator = row.rating_duration
elif (row.onset + row.rating_duration) > end:
numerator = end - row.onset
else:
numerator = 999999 # add error here
if row.event_type != "last_row":
ratings.append(
{
"start": start,
"end": end,
"row_time": row.rating_duration,
"row_start": row.onset,
"block_length": block_length,
"rating": row.participant_value,
"time_held": numerator,
}
)
nums = [float(d["rating"]) for d in ratings]
times = [
float(d["time_held"]) / block_length
for d in ratings
]
avg = np.sum(np.multiply(nums, times))
last_row = row.participant_value
else:
avg = last_row
two_s_avg.append(float(avg))
list_of_rows.append(
{
"event_type": "running_avg",
"participant_value": float(avg),
"onset": start,
"duration": end - start,
"gold_std": gold[x],
}
)
n_button_press = len(block[block.event_type == "button_press"].index)
block_score = np.corrcoef(gold, two_s_avg)[1][0]
key = str(block_name)
summary_vals.update(
{
key: {
"n_button_press": int(n_button_press),
"block_score": block_score,
"onset": block_start,
"duration": block_end - block_start,
}
}
)
return list_of_rows, summary_vals
def outputs_exist(log_file, output_path):
if not os.path.exists(output_path):
return False
if os.path.getmtime(output_path) < os.path.getmtime(log_file):
logger.error(
"Output file is less recently modified than its task file"
f" {log_file}. Output will be deleted and regenerated."
)
try:
os.remove(output_path)
except Exception as e:
logger.error(
f"Failed to remove output file {output_path}, cannot "
f"regenerate. Reason - {e}"
)
return True
return False
return True
def get_output_path(ident, log_file, dest_dir):
try:
os.makedirs(dest_dir)
except FileExistsError:
pass
part = re.findall(r"((?:part|RUN)\d).log", log_file)
if not part:
logger.error(
f"Can't detect which part task file {log_file} "
"corresponds to. Ignoring file."
)
return
else:
part = part[0]
return os.path.join(dest_dir, f"{ident}_EAtask_{part}.tsv")
def parse_task(ident, log_file, dest_dir, length_file, timing_file):
output_path = get_output_path(ident, log_file, dest_dir)
if outputs_exist(log_file, output_path):
return
# Reads in and clean the log, skipping the first three preamble lines
try:
log = read_in_logfile(log_file)
log_cleaned = clean_logfile(log)
except Exception as e:
logger.error(
f"Cannot parse {log_file}! File maybe corrupted! Skipping"
)
return
vid_in = | pd.read_csv(length_file) | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Testing hwm_allocation() with bookings in natural order.
import unittest
from imscommon.es.ims_esclient import ESClient
from pyspark.sql import HiveContext
from pyspark import SparkContext, SparkConf
import optimizer.util
import pandas
from pandas.testing import assert_frame_equal
import optimizer.algo.hwm
import os
import json
import warnings
class Unittest_HWM_Allocations_2(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
with open(fpath + '/data_source/bookings_fully_overlapped.json') as bookings_source:
self.bookings = json.load(bookings_source)
with open(fpath + '/data_source/cfg.json') as cfg_source:
self.cfg = json.load(cfg_source)
today = '20180402'
self.days = optimizer.util.get_days_from_bookings(today, self.bookings)
self.sc = SparkContext.getOrCreate()
self.hive_context = HiveContext(self.sc)
self.schema = optimizer.util.get_common_pyspark_schema()
def compare_two_dfs(self, pandas_df_expected, df_to_test_rows):
df = self.hive_context.createDataFrame(df_to_test_rows, self.schema)
df_allocated = optimizer.algo.hwm.hwm_allocation(df, self.bookings, self.days)
pandas_df_allocated = df_allocated.select("*").toPandas()
print(pandas_df_expected)
print(pandas_df_allocated)
return self.assertTrue(assert_frame_equal(pandas_df_expected, pandas_df_allocated, check_dtype=False) == None)
def test_hwm_allocation_case1(self):
pandas_df_expected = pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated'])
pandas_df_expected.loc[0] = ['20180402', ['b1', 'b3', 'b2'], [], 733, {'b1': 500, 'b3': 233}]
df_to_test_rows = [(['20180402', ['b1', 'b3', 'b2'], [], {}, 733])]
return self.compare_two_dfs(pandas_df_expected, df_to_test_rows)
def test_hwm_allocation_case2(self):
pandas_df_expected = | pandas.DataFrame(columns=['day', 'ands', 'minus', 'amount', 'allocated']) | pandas.DataFrame |
import re
import json
import operator
import sys
import dateutil.parser as dp
import pandas as pd
from typing import List, Tuple, Set
from rltk import similarity
from tl.exceptions import TLException
import numpy as np
ccm_columns = ['type', 'score', 'property', 'row',
'col1', 'col1_item', 'col1_string', 'col2', 'col2_string', 'col2_item']
valid_property_types = {'i', 'd', 'q', 'e'}
class CellContextMatches:
"""
Contains the context matches for a single cell to all other cells in the same row.
This class contains all the triples for all candidates for a cell.
"""
def __init__(self,
row: str,
col: str,
):
"""
Create an empty CellContextMatches for a specific cell.
"""
self.row = row
self.col = col
# self.ccm = pd.DataFrame(columns=ccm_columns)
self.ccm = dict()
self.col1_items = set()
def add_triple(self,
row: str,
col1: str,
col1_item: str,
col1_string: str,
type: str,
score: float,
property: str,
col2: str,
col2_string: str,
best_match: str,
col2_item: str = None):
"""
Add a single triple to CellContextMatches.
"""
triple = {
'type': type,
'score': score,
'property': property,
'row': row,
'col1': col1,
'col1_item': col1_item,
'col1_string': col1_string,
'col2': col2,
'col2_string': col2_string,
'col2_item': col2_item,
'best_match': best_match
}
if col2 not in self.ccm:
self.ccm[col2] = list()
self.ccm[col2].append(triple)
self.col1_items.add(col1_item)
def has_candidate(self, col1_item: str):
"""
Returns true of the CellContextMatches contains information for a given q_node.
"""
return col1_item in self.col1_items
def get_triples(self):
"""
Return a list of all the triples
"""
out = []
for k in self.ccm:
triples = self.ccm[k]
out.extend(triples)
return out
def get_triples_to_column(self, col2: str):
"""
Return the triples to another column.
"""
if self.col == col2:
raise Exception(f'Cannot find context for a column with itself. col1: {self.col}, col2: {col2}')
return self.ccm.get(col2, [])
def get_properties(self, col2: str, q_node: str = None) -> List[Tuple[str, str, float, int]]:
"""
list of tuples (property, type, best score, count_appears)
-> [("P175", "i", 0.95, 4), ...]
current_col
for col in range(0, max_columns):
for row in range (0, max_rows):
cc = tcm.get_cell_context(row, col)
props = cc.get_properties(3)
"""
if self.col == col2:
raise Exception(f'Cannot find context for a column with itself. col1: {self.col}, col2: {col2}')
result = []
col2_records = self.ccm.get(col2, [])
if q_node:
col2_records = [s for s in col2_records if s.get('col1_item') == q_node]
prop_count = {}
for record in col2_records:
property = record['property']
score = record['score']
if property not in prop_count:
prop_count[property] = {
'count': 0,
'max_score': -1.0,
'type': record['type'],
'sum': 0
}
prop_count[property]['count'] += 1
prop_count[property]['sum'] += score
if score > prop_count[property]['max_score']:
prop_count[property]['max_score'] = score
for property in prop_count:
number_of_occurences = prop_count[property]['count']
avg_score = prop_count[property]['sum'] / number_of_occurences
max_score = prop_count[property]['max_score']
result.append((property,
prop_count[property]['type'],
max_score,
avg_score,
number_of_occurences))
return result
class TableContextMatches:
"""
Contains all context matches for a table, for every cell that we have to link to all other cells in
the same row.
"""
def __init__(self,
context_path: str = None,
context_dict: dict = None,
input_df: pd.DataFrame = None,
input_path: str = None,
context_matches_path=None,
label_column: str = 'label_clean',
ignore_column: str = None,
relevant_properties_file: str = None,
use_relevant_properties: bool = False,
save_relevant_properties: bool = False,
string_similarity_threshold: float = 0.7,
quantity_similarity_threshold: float = 0.3,
output_column_name: str = "context_score"
):
"""
Maybe better to have a set of columns
Create a ContextMatches datastructure to store the context matches between columns in a row.
Each entry in the ContextMatches array is a list of dicts, where each dict contains
row, col1, col2, property, score, col1_item, col2_string and col2_item.
The internal datastructure must return the matches between two columns in a rows in constant time,
so the backing store must be NumPy array.
"""
self.ignore_column = ignore_column
if self.ignore_column:
self.prefix_column_name = "ignore_"
else:
self.prefix_column_name = ""
self.row_col_label_dict = {}
if context_path is not None:
context_dict = self.read_context_file(context_path)
self.output_column_name = output_column_name
self.ccm_dict = {}
self.string_similarity_threshold = string_similarity_threshold
self.quantity_similarity_threshold = quantity_similarity_threshold
self.input_df = None
if input_path is not None:
input_df = pd.read_csv(input_path)
self.relevant_properties_file = relevant_properties_file
self.use_relevant_properties = use_relevant_properties
self.save_relevant_properties = save_relevant_properties
self.relevant_properties = {}
if use_relevant_properties:
self.relevant_properties = self.read_relevant_properties()
input_df['row'] = input_df['row'].astype('str')
input_df['column'] = input_df['column'].astype('str')
self.main_entity_column = self.find_main_entity_column(input_df, label_column)
self.initialize(input_df, context_dict, label_column)
if context_matches_path is not None:
self.load_from_disk(context_matches_path)
def read_relevant_properties(self) -> dict: # or whatever datastructure makes sense
if self.relevant_properties_file is None:
raise TLException('Please specify a valid path for relevant properties.')
relevant_properties_df = pd.read_csv(self.relevant_properties_file)
relevant_properties_group = relevant_properties_df.groupby(['column', 'col2'])
relevant_properties_dict = {}
for cell, group in relevant_properties_group:
column_column_pair = f"{cell[0]}_{cell[1]}"
all_properties = set(group['property_'].unique())
relevant_properties_dict[column_column_pair] = all_properties
return relevant_properties_dict
def write_relevant_properties(self, relevant_properties_df: pd.DataFrame):
if self.relevant_properties_file is None:
raise TLException('Please specify a valid path for relevant properties.')
relevant_properties_df.to_csv(self.relevant_properties_file, index=False)
def is_relevant_property(self, col1: str, col2: str, property: str) -> bool:
column_column_pair = f"{col1}_{col2}"
# Lookup the dictionary
if column_column_pair in self.relevant_properties:
column_relevant_properties = self.relevant_properties[column_column_pair]
if property in column_relevant_properties:
return True
return False
def find_main_entity_column(self, input_df, label_column) -> str:
col_labels_dict = {}
for col, gdf in input_df.groupby(by=['column']):
col_labels_dict[col] = len(gdf[label_column].unique())
max_cols = [key for (key, value) in col_labels_dict.items() if value == max(col_labels_dict.values())]
if len(max_cols) == 1:
return max_cols[0]
return '0'
def initialize(self, raw_input_df, context_dict, label_column):
raw_input_df['kg_labels'].fillna("", inplace=True)
raw_input_df['kg_aliases'].fillna("", inplace=True)
if self.ignore_column is not None:
_input_df = raw_input_df[(raw_input_df[self.ignore_column].astype(float) == 0)
& (raw_input_df['column'] == self.main_entity_column)]
not_ignored_rows = set(_input_df['row'].unique())
_input_df_2 = raw_input_df[
(raw_input_df['row'].isin(not_ignored_rows)) & (raw_input_df["column"] != self.main_entity_column)]
self.input_df = pd.concat([_input_df, _input_df_2])
not_ignored_indices = self.input_df.index
self.other_input_df = raw_input_df[~raw_input_df.index.isin(not_ignored_indices)]
assert (len(self.input_df) + len(self.other_input_df)) == len(raw_input_df)
else:
self.input_df = raw_input_df
self.other_input_df = None
rows = set(self.input_df['row'].unique())
columns = set(self.input_df['column'].unique())
row_column_pairs = set()
for row, col, label in zip(self.input_df['row'], self.input_df['column'], self.input_df[label_column]):
key = f"{row}_{col}"
row_column_pairs.add(key)
# row_column_label_dict stores only the row_column pairs that need to be matched
for row, col, context in zip(self.input_df['row'], self.input_df['column'], self.input_df['context']):
if col == '0':
context_vals = context.split('|')
for i, context_val in enumerate(context_vals):
context_column = i + 1
row_col_dict_key = f"{row}_{context_column}"
if row_col_dict_key not in self.row_col_label_dict:
try:
date = dp.parse(context_val)
context_val = str(date.year)
except:
pass
self.row_col_label_dict[row_col_dict_key] = context_val
columns.add(str(context_column))
for row, col, kg_id, kg_id_label_str, kg_id_alias_str in zip(self.input_df['row'],
self.input_df['column'],
self.input_df['kg_id'],
self.input_df['kg_labels'],
self.input_df['kg_aliases']):
kg_id_context = context_dict.get(kg_id, None)
kg_labels = []
if kg_id_label_str and kg_id_label_str.strip() != "":
kg_labels.append(kg_id_label_str.strip())
if kg_id_alias_str and kg_id_alias_str.strip() != "":
kg_labels.append(kg_id_alias_str.strip())
kg_label_str = "|".join(kg_labels)
ccm_key = f"{row}_{col}"
if ccm_key not in self.ccm_dict:
self.ccm_dict[ccm_key] = CellContextMatches(row, col)
if kg_id_context is not None:
for col2 in columns:
if (col != col2) and (col == self.main_entity_column or col2 == self.main_entity_column):
ccm_key_2 = f"{row}_{col2}"
if ccm_key_2 not in self.ccm_dict:
self.ccm_dict[ccm_key_2] = CellContextMatches(row, col2)
context_results = self.compute_context_similarity(kg_id_context, col,
col2,
self.row_col_label_dict.get(f"{row}_{col2}",
None))
for context_result in context_results:
self.add_match(row=row,
col1=col,
col1_item=kg_id,
col1_string=kg_label_str,
col2=col2,
col2_item=context_result['col2_item'],
col2_string=context_result['col2_string'],
type=context_result['type'],
property=context_result['property'],
score=context_result['score'],
best_match=context_result['best_match']
)
self.input_df = self.process(row_column_pairs, columns)
def process(self, row_column_pairs: set, n_context_columns: set):
context_scores, properties, similarities = self.compute_context_scores(n_context_columns, row_column_pairs)
self.input_df[self.output_column_name] = context_scores
self.input_df[self.prefix_column_name + 'context_properties'] = properties
self.input_df[self.prefix_column_name + 'context_similarity'] = similarities
out = [self.input_df]
if self.other_input_df is not None:
out.append(self.other_input_df)
return pd.concat(out).fillna(0.0)
def correctness_of_candidate(self):
# Number of matches are the number it matched correctly
pass
def compute_context_scores(self, n_context_columns: set, row_column_pairs: set) -> (
List[int], List[str], List[int]):
self.compute_property_scores(row_column_pairs, n_context_columns)
context_score_list = []
context_property_list = []
context_similarity_list = []
for row, col, q_node in zip(self.input_df['row'], self.input_df['column'], self.input_df['kg_id']):
# Handle equal similarity for different properties by looping over and getting
# the one with highest similarity.
property_matched = []
similarity_matched = []
sum_of_properties = 0
r_c = f"{row}_{col}"
for col2 in n_context_columns:
if col2 != col and (col == self.main_entity_column or col2 == self.main_entity_column):
returned_properties = self.ccm_dict[r_c].get_properties(col2, q_node=q_node)
if not returned_properties:
continue
best_score = 0
property_ = None
for properties in returned_properties:
if properties[2] > best_score:
property_ = properties[0]
best_score = properties[2]
# if property_ not in current_relevant_properties: pass
property_matched.append(property_ + "(" + str(best_score) + ")")
similarity_matched.append(best_score)
sum_of_properties = sum_of_properties + best_score
if sum_of_properties == 0:
context_score = 0
else:
context_score = (1 - 1 / pow(2, sum_of_properties))
context_score_list.append(context_score)
context_similarity_list.append(similarity_matched)
context_property_list.append(property_matched)
return context_score_list, context_property_list, context_similarity_list
def compute_property_scores(self, row_column_pairs: set, n_context_columns: set):
properties_df_list = []
for r_c in row_column_pairs:
row_col = r_c.split("_")
row = row_col[0]
col = row_col[1]
for col2 in n_context_columns:
if (col2 != col) and (col2 == self.main_entity_column or col == self.main_entity_column):
m = self.ccm_dict[r_c].get_properties(col2)
int_prop = pd.DataFrame(m, columns=["property_", "type", "best_score", "avg_score", "n_occurences"])
int_prop['row'] = row
int_prop['column'] = col
int_prop['col2'] = col2
properties_df_list.append(int_prop)
properties_df = pd.concat(properties_df_list)
property_value_list = []
grouped_obj = properties_df.groupby(['column', 'col2', 'property_'])
for cell, group in grouped_obj:
property_score = (group['avg_score'].sum(axis=0))
property_value_list.append([cell[2], cell[0], cell[1], property_score])
property_value_df = | pd.DataFrame(property_value_list, columns=['property_', 'column', 'col2', 'property_score']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
| tm.assert_equal(result, idx) | pandas.util.testing.assert_equal |
# Author: <NAME> <<EMAIL>>
#
# License: Apache Software License 2.0
"""Tests concerning :class:`nannyml.metadata.regression.RegressionMetadata`."""
from typing import Tuple
import pandas as pd
import pytest
from nannyml.datasets import load_synthetic_binary_classification_dataset
from nannyml.metadata import FeatureType, ModelType, RegressionMetadata, extract_metadata
from nannyml.metadata.base import (
NML_METADATA_PARTITION_COLUMN_NAME,
NML_METADATA_TARGET_COLUMN_NAME,
NML_METADATA_TIMESTAMP_COLUMN_NAME,
)
from nannyml.metadata.regression import NML_METADATA_PREDICTION_COLUMN_NAME, _guess_predictions
@pytest.fixture
def data() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: # noqa: D103
ref_df, ana_df, tgt_df = load_synthetic_binary_classification_dataset() # TODO create new regression sample data
return ref_df, ana_df, tgt_df
@pytest.fixture
def metadata(data) -> RegressionMetadata: # noqa: D103
md = extract_metadata(data[0], model_type='regression')
md.target_column_name = 'work_home_actual'
return md
def test_model_metadata_creation_with_defaults_has_correct_properties(): # noqa: D103
sut = RegressionMetadata()
assert sut.name is None
assert sut.model_type == ModelType.REGRESSION
assert sut.features is not None
assert len(sut.features) == 0
assert sut.prediction_column_name is None
assert sut.target_column_name == 'target'
assert sut.partition_column_name == 'partition'
assert sut.timestamp_column_name == 'date'
def test_model_metadata_creation_sets_correct_values_for_special_attributes(): # noqa: D103
sut = RegressionMetadata(prediction_column_name='pred')
assert sut.prediction_column_name == 'pred'
def test_to_dict_contains_all_properties(metadata): # noqa: D103
sut = metadata.to_dict()
assert sut['prediction_column_name'] == 'y_pred'
def test_to_pd_contains_all_properties(metadata): # noqa: D103
sut = metadata.to_df()
assert sut.loc[sut['label'] == 'prediction_column_name', 'column_name'].iloc[0] == 'y_pred'
@pytest.mark.parametrize(
'col,expected',
[
('p', True),
('y_pred', True),
('pred', True),
('prediction', True),
('out', True),
('output', True),
('nope', False),
],
)
def test_guess_predictions_yields_correct_results(col, expected): # noqa: D103
sut = _guess_predictions(data= | pd.DataFrame(columns=[col]) | pandas.DataFrame |
"""General utility functions that are used in a variety of contexts.
The functions in this module are used in various stages of the ETL and post-etl
processes. They are usually not dataset specific, but not always. If a function
is designed to be used as a general purpose tool, applicable in multiple
scenarios, it should probably live here. There are lost of transform type
functions in here that help with cleaning and restructing dataframes.
"""
import itertools
import logging
import pathlib
import re
import shutil
from collections import defaultdict
from functools import partial
from importlib import resources
from io import BytesIO
from typing import Any, DefaultDict, Dict, List, Optional, Set, Union
import addfips
import numpy as np
import pandas as pd
import requests
import sqlalchemy as sa
from pudl.metadata.classes import DataSource, Package
from pudl.metadata.fields import apply_pudl_dtypes, get_pudl_dtypes
logger = logging.getLogger(__name__)
sum_na = partial(pd.Series.sum, skipna=False)
"""A sum function that returns NA if the Series includes any NA values.
In many of our aggregations we need to override the default behavior of treating
NA values as if they were zero. E.g. when calculating the heat rates of
generation units, if there are some months where fuel consumption is reported
as NA, but electricity generation is reported normally, then the fuel
consumption for the year needs to be NA, otherwise we'll get unrealistic heat
rates.
"""
def label_map(
df: pd.DataFrame,
from_col: str = "code",
to_col: str = "label",
null_value: Union[str, type(pd.NA)] = pd.NA,
) -> DefaultDict[str, Union[str, type(pd.NA)]]:
"""Build a mapping dictionary from two columns of a labeling / coding dataframe.
These dataframes document the meanings of the codes that show up in much of the
originally reported data. They're defined in :mod:`pudl.metadata.codes`. This
function is mostly used to build maps that can translate the hard to understand
short codes into longer human-readable codes.
Args:
df: The coding / labeling dataframe. Must contain columns ``from_col``
and ``to_col``.
from_col: Label of column containing the existing codes to be replaced.
to_col: Label of column containing the new codes to be swapped in.
null_value: Defualt (Null) value to map to when a value which doesn't
appear in ``from_col`` is encountered.
Returns:
A mapping dictionary suitable for use with :meth:`pandas.Series.map`.
"""
return defaultdict(
lambda: null_value,
df.loc[:, [from_col, to_col]]
.drop_duplicates(subset=[from_col])
.to_records(index=False),
)
def find_new_ferc1_strings(
table: str,
field: str,
strdict: Dict[str, List[str]],
ferc1_engine: sa.engine.Engine,
) -> Set[str]:
"""Identify as-of-yet uncategorized freeform strings in FERC Form 1.
Args:
table: Name of the FERC Form 1 DB to search.
field: Name of the column in that table to search.
strdict: A string cleaning dictionary. See
e.g. `pudl.transform.ferc1.FUEL_UNIT_STRINGS`
ferc1_engine: SQL Alchemy DB connection engine for the FERC Form 1 DB.
Returns:
Any string found in the searched table + field that was not part of any of
categories enumerated in strdict.
"""
all_strings = set(
pd.read_sql(f"SELECT {field} FROM {table};", ferc1_engine).pipe( # nosec
simplify_strings, columns=[field]
)[field]
)
old_strings = set.union(*[set(strings) for strings in strdict.values()])
return all_strings.difference(old_strings)
def find_foreign_key_errors(dfs: Dict[str, pd.DataFrame]) -> List[Dict[str, Any]]:
"""Report foreign key violations from a dictionary of dataframes.
The database schema to check against is generated based on the names of the
dataframes (keys of the dictionary) and the PUDL metadata structures.
Args:
dfs: Keys are table names, and values are dataframes ready for loading
into the SQLite database.
Returns:
A list of dictionaries, each one pertains to a single database table
in which a foreign key constraint violation was found, and it includes
the table name, foreign key definition, and the elements of the
dataframe that violated the foreign key constraint.
"""
package = Package.from_resource_ids(resource_ids=tuple(sorted(dfs)))
errors = []
for resource in package.resources:
for foreign_key in resource.schema.foreign_keys:
x = dfs[resource.name][foreign_key.fields]
y = dfs[foreign_key.reference.resource][foreign_key.reference.fields]
ncols = x.shape[1]
idx = range(ncols)
xx, yy = x.set_axis(idx, axis=1), y.set_axis(idx, axis=1)
if ncols == 1:
# Faster check for single-field foreign key
invalid = ~(xx[0].isin(yy[0]) | xx[0].isna())
else:
invalid = ~(
pd.concat([yy, xx]).duplicated().iloc[len(yy) :]
| xx.isna().any(axis=1)
)
if invalid.any():
errors.append(
{
"resource": resource.name,
"foreign_key": foreign_key,
"invalid": x[invalid],
}
)
return errors
def download_zip_url(url, save_path, chunk_size=128):
"""Download and save a Zipfile locally.
Useful for acquiring and storing non-PUDL data locally.
Args:
url (str): The URL from which to download the Zipfile
save_path (pathlib.Path): The location to save the file.
chunk_size (int): Data chunk in bytes to use while downloading.
Returns:
None
"""
# This is a temporary hack to avoid being filtered as a bot:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
r = requests.get(url, stream=True, headers=headers)
with save_path.open(mode="wb") as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def add_fips_ids(df, state_col="state", county_col="county", vintage=2015):
"""Add State and County FIPS IDs to a dataframe.
To just add State FIPS IDs, make county_col = None.
"""
# force the columns to be the nullable string types so we have a consistent
# null value to filter out before feeding to addfips
df = df.astype({state_col: pd.StringDtype()})
if county_col:
df = df.astype({county_col: pd.StringDtype()})
af = addfips.AddFIPS(vintage=vintage)
# Lookup the state and county FIPS IDs and add them to the dataframe:
df["state_id_fips"] = df.apply(
lambda x: (
af.get_state_fips(state=x[state_col]) if pd.notnull(x[state_col]) else pd.NA
),
axis=1,
)
# force the code columns to be nullable strings - the leading zeros are
# important
df = df.astype({"state_id_fips": pd.StringDtype()})
logger.info(
f"Assigned state FIPS codes for "
f"{len(df[df.state_id_fips.notnull()])/len(df):.2%} of records."
)
if county_col:
df["county_id_fips"] = df.apply(
lambda x: (
af.get_county_fips(state=x[state_col], county=x[county_col])
if pd.notnull(x[county_col]) and pd.notnull(x[state_col])
else pd.NA
),
axis=1,
)
# force the code columns to be nullable strings - the leading zeros are
# important
df = df.astype({"county_id_fips": pd.StringDtype()})
logger.info(
f"Assigned county FIPS codes for "
f"{len(df[df.county_id_fips.notnull()])/len(df):.2%} of records."
)
return df
def clean_eia_counties(df, fixes, state_col="state", county_col="county"):
"""Replace non-standard county names with county nmes from US Census."""
df = df.copy()
df[county_col] = (
df[county_col]
.str.strip()
# Condense multiple whitespace chars.
.str.replace(r"\s+", " ", regex=True)
.str.replace(r"^St ", "St. ", regex=True) # Standardize abbreviation.
# Standardize abbreviation.
.str.replace(r"^Ste ", "Ste. ", regex=True)
.str.replace("Kent & New Castle", "Kent, New Castle") # Two counties
# Fix ordering, remove comma
.str.replace("Borough, Kodiak Island", "Kodiak Island Borough")
# Turn comma-separated counties into lists
.str.replace(r",$", "", regex=True)
.str.split(",")
)
# Create new records for each county in a multi-valued record
df = df.explode(county_col)
df[county_col] = df[county_col].str.strip()
# Yellowstone county is in MT, not WY
df.loc[
(df[state_col] == "WY") & (df[county_col] == "Yellowstone"), state_col
] = "MT"
# Replace individual bad county names with identified correct names in fixes:
for fix in fixes.itertuples():
state_mask = df[state_col] == fix.state
county_mask = df[county_col] == fix.eia_county
df.loc[state_mask & county_mask, county_col] = fix.fips_county
return df
def oob_to_nan(df, cols, lb=None, ub=None):
"""Set non-numeric values and those outside of a given rage to NaN.
Args:
df (pandas.DataFrame): The dataframe containing values to be altered.
cols (iterable): Labels of the columns whose values are to be changed.
lb: (number): Lower bound, below which values are set to NaN. If None,
don't use a lower bound.
ub: (number): Upper bound, below which values are set to NaN. If None,
don't use an upper bound.
Returns:
pandas.DataFrame: The altered DataFrame.
"""
out_df = df.copy()
for col in cols:
# Force column to be numeric if possible, NaN otherwise:
out_df.loc[:, col] = pd.to_numeric(out_df[col], errors="coerce")
if lb is not None:
out_df.loc[out_df[col] < lb, col] = np.nan
if ub is not None:
out_df.loc[out_df[col] > ub, col] = np.nan
return out_df
def prep_dir(dir_path, clobber=False):
"""Create (or delete and recreate) a directory.
Args:
dir_path (path-like): path to the directory that you are trying to
clean and prepare.
clobber (bool): If True and dir_path exists, it will be removed and
replaced with a new, empty directory.
Raises:
FileExistsError: if a file or directory already exists at dir_path.
Returns:
pathlib.Path: Path to the created directory.
"""
dir_path = pathlib.Path(dir_path)
if dir_path.exists():
if clobber:
shutil.rmtree(dir_path)
else:
raise FileExistsError(f"{dir_path} exists and clobber is {clobber}")
dir_path.mkdir(parents=True)
return dir_path
def is_doi(doi):
"""Determine if a string is a valid digital object identifier (DOI).
Function simply checks whether the offered string matches a regular
expresssion -- it doesn't check whether the DOI is actually registered
with the relevant authority.
Args:
doi (str): String to validate.
Returns:
bool: True if doi matches the regex for valid DOIs, False otherwise.
"""
doi_regex = re.compile(
r"(doi:\s*|(?:https?://)?(?:dx\.)?doi\.org/)?(10\.\d+(.\d+)*/.+)$",
re.IGNORECASE | re.UNICODE,
)
return bool(re.match(doi_regex, doi))
def clean_merge_asof(
left: pd.DataFrame,
right: pd.DataFrame,
left_on: str = "report_date",
right_on: str = "report_date",
by: List[str] = [],
) -> pd.DataFrame:
"""Merge two dataframes having different ``report_date`` frequencies.
We often need to bring together data which is reported on a monthly basis, and
entity attributes that are reported on an annual basis. The
:func:`pandas.merge_asof` is designed to do this, but requires that dataframes are
sorted by the merge keys (``left_on``, ``right_on``, and ``by`` here). We also need
to make sure that all merge keys have identical data types in the two dataframes
(e.g. ``plant_id_eia`` needs to be a nullable integer in both dataframes, not a
python int in one, and a nullable :func:`pandas.Int64Dtype` in the other). Note
that :func:`pandas.merge_asof` performs a left merge, so the higher frequency
dataframe **must** be the left dataframe.
We also force both ``left_on`` and ``right_on`` to be a Datetime using
:func:`pandas.to_datetime` to allow merging dataframes having integer years with
those having datetime columns.
Because :func:`pandas.merge_asof` searches backwards for the first matching date,
this function only works if the less granular dataframe uses the convention of
reporting the first date in the time period for which it reports. E.g. annual
dataframes need to have January 1st as the date. This is what happens by defualt if
only a year or year-month are provided to :func:`pandas.to_datetime` as strings.
Args:
left: The higher frequency "data" dataframe. Typically monthly in our use
cases. E.g. ``generation_eia923``. Must contain ``report_date`` and any
columns specified in the ``by`` argument.
right: The lower frequency "attribute" dataframe. Typically annual in our uses
cases. E.g. ``generators_eia860``. Must contain ``report_date`` and any
columns specified in the ``by`` argument.
left_on: Column in ``left`` to merge on using merge_asof. Default is
``report_date``. Must be convertible to a Datetime using
:func:`pandas.to_datetime`
right_on: Column in ``right`` to merge on using :func:`pd.merge_asof`. Default
is ``report_date``. Must be convertible to a Datetime using
:func:`pandas.to_datetime`
by: Columns to merge on in addition to ``report_date``. Typically ID columns
like ``plant_id_eia``, ``generator_id`` or ``boiler_id``.
Returns:
Merged contents of left and right input dataframes. Will be sorted by
``left_on`` and any columns specified in ``by``. See documentation for
:func:`pandas.merge_asof` to understand how this kind of merge works.
Raises:
ValueError: if ``left_on`` or ``right_on`` columns are missing from their
respective input dataframes.
ValueError: if any of the labels referenced in ``by`` are missing from either
the left or right dataframes.
"""
# Make sure we've got all the required inputs...
if left_on not in left.columns:
raise ValueError(f"Left dataframe has no column {left_on}.")
if right_on not in right.columns:
raise ValueError(f"Right dataframe has no {right_on}.")
missing_left_cols = [col for col in by if col not in left.columns]
if missing_left_cols:
raise ValueError(f"Left dataframe is missing {missing_left_cols}.")
missing_right_cols = [col for col in by if col not in right.columns]
if missing_right_cols:
raise ValueError(f"Left dataframe is missing {missing_right_cols}.")
def cleanup(df, on, by):
df = apply_pudl_dtypes(df)
df.loc[:, on] = pd.to_datetime(df[on])
df = df.sort_values([on] + by)
return df
return pd.merge_asof(
cleanup(df=left, on=left_on, by=by),
cleanup(df=right, on=right_on, by=by),
left_on=left_on,
right_on=right_on,
by=by,
tolerance=pd.Timedelta("365 days"), # Should never match across years.
)
def organize_cols(df, cols):
"""Organize columns into key ID & name fields & alphabetical data columns.
For readability, it's nice to group a few key columns at the beginning
of the dataframe (e.g. report_year or report_date, plant_id...) and then
put all the rest of the data columns in alphabetical order.
Args:
df: The DataFrame to be re-organized.
cols: The columns to put first, in their desired output ordering.
Returns:
pandas.DataFrame: A dataframe with the same columns as the input
DataFrame df, but with cols first, in the same order as they
were passed in, and the remaining columns sorted alphabetically.
"""
# Generate a list of all the columns in the dataframe that are not
# included in cols
data_cols = sorted([c for c in df.columns.tolist() if c not in cols])
organized_cols = cols + data_cols
return df[organized_cols]
def simplify_strings(df, columns):
"""Simplify the strings contained in a set of dataframe columns.
Performs several operations to simplify strings for comparison and parsing purposes.
These include removing Unicode control characters, stripping leading and trailing
whitespace, using lowercase characters, and compacting all internal whitespace to a
single space.
Leaves null values unaltered. Casts other values with astype(str).
Args:
df (pandas.DataFrame): DataFrame whose columns are being cleaned up.
columns (iterable): The labels of the string columns to be simplified.
Returns:
pandas.DataFrame: The whole DataFrame that was passed in, with
the string columns cleaned up.
"""
out_df = df.copy()
for col in columns:
if col in out_df.columns:
out_df.loc[out_df[col].notnull(), col] = (
out_df.loc[out_df[col].notnull(), col]
.astype(str)
.str.replace(r"[\x00-\x1f\x7f-\x9f]", "", regex=True)
.str.strip()
.str.lower()
.str.replace(r"\s+", " ", regex=True)
)
return out_df
def cleanstrings_series(col, str_map, unmapped=None, simplify=True):
"""Clean up the strings in a single column/Series.
Args:
col (pandas.Series): A pandas Series, typically a single column of a
dataframe, containing the freeform strings that are to be cleaned.
str_map (dict): A dictionary of lists of strings, in which the keys are
the simplified canonical strings, witch which each string found in
the corresponding list will be replaced.
unmapped (str): A value with which to replace any string found in col
that is not found in one of the lists of strings in map. Typically
the null string ''. If None, these strings will not be replaced.
simplify (bool): If True, strip and compact whitespace, and lowercase
all strings in both the list of values to be replaced, and the
values found in col. This can reduce the number of strings that
need to be kept track of.
Returns:
pandas.Series: The cleaned up Series / column, suitable for
replacing the original messy column in a :class:`pandas.DataFrame`.
"""
if simplify:
col = (
col.astype(str).str.strip().str.lower().str.replace(r"\s+", " ", regex=True)
)
for k in str_map:
str_map[k] = [re.sub(r"\s+", " ", s.lower().strip()) for s in str_map[k]]
for k in str_map:
if str_map[k]:
col = col.replace(str_map[k], k)
if unmapped is not None:
badstrings = np.setdiff1d(col.unique(), list(str_map.keys()))
# This call to replace can only work if there are actually some
# leftover strings to fix -- otherwise it runs forever because we
# are replacing nothing with nothing.
if len(badstrings) > 0:
col = col.replace(badstrings, unmapped)
return col
def cleanstrings(df, columns, stringmaps, unmapped=None, simplify=True):
"""Consolidate freeform strings in several dataframe columns.
This function will consolidate freeform strings found in `columns` into
simplified categories, as defined by `stringmaps`. This is useful when
a field contains many different strings that are really meant to represent
a finite number of categories, e.g. a type of fuel. It can also be used to
create simplified categories that apply to similar attributes that are
reported in various data sources from different agencies that use their own
taxonomies.
The function takes and returns a pandas.DataFrame, making it suitable for
use with the :func:`pandas.DataFrame.pipe` method in a chain.
Args:
df (pandas.DataFrame): the DataFrame containing the string columns to
be cleaned up.
columns (list): a list of string column labels found in the column
index of df. These are the columns that will be cleaned.
stringmaps (list): a list of dictionaries. The keys of these
dictionaries are strings, and the values are lists of strings. Each
dictionary in the list corresponds to a column in columns. The
keys of the dictionaries are the values with which every string in
the list of values will be replaced.
unmapped (str, None): the value with which strings not found in the
stringmap dictionary will be replaced. Typically the null string
''. If None, then strings found in the columns but not in the
stringmap will be left unchanged.
simplify (bool): If true, strip whitespace, remove duplicate
whitespace, and force lower-case on both the string map and the
values found in the columns to be cleaned. This can reduce the
overall number of string values that need to be tracked.
Returns:
pandas.DataFrame: The function returns a new DataFrame containing the
cleaned strings.
"""
out_df = df.copy()
for col, str_map in zip(columns, stringmaps):
out_df[col] = cleanstrings_series(
out_df[col], str_map, unmapped=unmapped, simplify=simplify
)
return out_df
def fix_int_na(df, columns, float_na=np.nan, int_na=-1, str_na=""):
"""Convert NA containing integer columns from float to string.
Numpy doesn't have a real NA value for integers. When pandas stores integer
data which has NA values, it thus upcasts integers to floating point
values, using np.nan values for NA. However, in order to dump some of our
dataframes to CSV files for use in data packages, we need to write out
integer formatted numbers, with empty strings as the NA value. This
function replaces np.nan values with a sentinel value, converts the column
to integers, and then to strings, finally replacing the sentinel value with
the desired NA string.
This is an interim solution -- now that pandas extension arrays have been
implemented, we need to go back through and convert all of these integer
columns that contain NA values to Nullable Integer types like Int64.
Args:
df (pandas.DataFrame): The dataframe to be fixed. This argument allows
method chaining with the pipe() method.
columns (iterable of strings): A list of DataFrame column labels
indicating which columns need to be reformatted for output.
float_na (float): The floating point value to be interpreted as NA and
replaced in col.
int_na (int): Sentinel value to substitute for float_na prior to
conversion of the column to integers.
str_na (str): sa.String value to substitute for int_na after the column
has been converted to strings.
Returns:
df (pandas.DataFrame): a new DataFrame, with the selected columns
converted to strings that look like integers, compatible with
the postgresql COPY FROM command.
"""
return (
df.replace({c: float_na for c in columns}, int_na)
.astype({c: int for c in columns})
.astype({c: str for c in columns})
.replace({c: str(int_na) for c in columns}, str_na)
)
def month_year_to_date(df):
"""Convert all pairs of year/month fields in a dataframe into Date fields.
This function finds all column names within a dataframe that match the
regular expression '_month$' and '_year$', and looks for pairs that have
identical prefixes before the underscore. These fields are assumed to
describe a date, accurate to the month. The two fields are used to
construct a new _date column (having the same prefix) and the month/year
columns are then dropped.
Todo:
This function needs to be combined with convert_to_date, and improved:
* find and use a _day$ column as well
* allow specification of default month & day values, if none are found.
* allow specification of lists of year, month, and day columns to be
combined, rather than automataically finding all the matching ones.
* Do the Right Thing when invalid or NA values are encountered.
Args:
df (pandas.DataFrame): The DataFrame in which to convert year/months
fields to Date fields.
Returns:
pandas.DataFrame: A DataFrame in which the year/month fields have been
converted into Date fields.
"""
df = df.copy()
month_regex = "_month$"
year_regex = "_year$"
# Columns that match our month or year patterns.
month_cols = list(df.filter(regex=month_regex).columns)
year_cols = list(df.filter(regex=year_regex).columns)
# Base column names that don't include the month or year pattern
months_base = [re.sub(month_regex, "", m) for m in month_cols]
years_base = [re.sub(year_regex, "", y) for y in year_cols]
# We only want to retain columns that have BOTH month and year
# matches -- otherwise there's no point in creating a Date.
date_base = [base for base in months_base if base in years_base]
# For each base column that DOES have both a month and year,
# We need to grab the real column names corresponding to each,
# so we can access the values in the data frame, and use them
# to create a corresponding Date column named [BASE]_date
month_year_date = []
for base in date_base:
base_month_regex = f"^{base}{month_regex}"
month_col = list(df.filter(regex=base_month_regex).columns)
if not len(month_col) == 1:
raise AssertionError()
month_col = month_col[0]
base_year_regex = f"^{base}{year_regex}"
year_col = list(df.filter(regex=base_year_regex).columns)
if not len(year_col) == 1:
raise AssertionError()
year_col = year_col[0]
date_col = f"{base}_date"
month_year_date.append((month_col, year_col, date_col))
for month_col, year_col, date_col in month_year_date:
df = fix_int_na(df, columns=[year_col, month_col])
date_mask = (df[year_col] != "") & (df[month_col] != "")
years = df.loc[date_mask, year_col]
months = df.loc[date_mask, month_col]
df.loc[date_mask, date_col] = pd.to_datetime(
{"year": years, "month": months, "day": 1}, errors="coerce"
)
# Now that we've replaced these fields with a date, we drop them.
df = df.drop([month_col, year_col], axis=1)
return df
def fix_leading_zero_gen_ids(df):
"""Remove leading zeros from EIA generator IDs which are numeric strings.
If the DataFrame contains a column named ``generator_id`` then that column
will be cast to a string, and any all numeric value with leading zeroes
will have the leading zeroes removed. This is necessary because in some
but not all years of data, some of the generator IDs are treated as integers
in the Excel spreadsheets published by EIA, so the same generator may show
up with the ID "0001" and "1" in different years.
Alphanumeric generator IDs with leadings zeroes are not affected, as we
found no instances in which an alphanumeric generator ID appeared both with
and without leading zeroes.
Args:
df (pandas.DataFrame): DataFrame, presumably containing a column named
generator_id (otherwise no action will be taken.)
Returns:
pandas.DataFrame
"""
if "generator_id" in df.columns:
fixed_generator_id = (
df["generator_id"]
.astype(str)
.apply(lambda x: re.sub(r"^0+(\d+$)", r"\1", x))
)
num_fixes = len(df.loc[df["generator_id"].astype(str) != fixed_generator_id])
logger.debug("Fixed %s EIA generator IDs with leading zeros.", num_fixes)
df = df.drop("generator_id", axis="columns").assign(
generator_id=fixed_generator_id
)
return df
def convert_to_date(
df,
date_col="report_date",
year_col="report_year",
month_col="report_month",
day_col="report_day",
month_value=1,
day_value=1,
):
"""Convert specified year, month or day columns into a datetime object.
If the input ``date_col`` already exists in the input dataframe, then no
conversion is applied, and the original dataframe is returned unchanged.
Otherwise the constructed date is placed in that column, and the columns
which were used to create the date are dropped.
Args:
df (pandas.DataFrame): dataframe to convert
date_col (str): the name of the column you want in the output.
year_col (str): the name of the year column in the original table.
month_col (str): the name of the month column in the original table.
day_col: the name of the day column in the original table.
month_value (int): generated month if no month exists.
day_value (int): generated day if no month exists.
Returns:
pandas.DataFrame: A DataFrame in which the year, month, day columns
values have been converted into datetime objects.
Todo:
Update docstring.
"""
df = df.copy()
if date_col in df.columns:
return df
year = df[year_col]
if month_col not in df.columns:
month = month_value
else:
month = df[month_col]
if day_col not in df.columns:
day = day_value
else:
day = df[day_col]
df[date_col] = pd.to_datetime({"year": year, "month": month, "day": day})
cols_to_drop = [x for x in [day_col, year_col, month_col] if x in df.columns]
df.drop(cols_to_drop, axis="columns", inplace=True)
return df
def fix_eia_na(df):
"""Replace common ill-posed EIA NA spreadsheet values with np.nan.
Currently replaces empty string, single decimal points with no numbers,
and any single whitespace character with np.nan.
Args:
df (pandas.DataFrame): The DataFrame to clean.
Returns:
pandas.DataFrame: The cleaned DataFrame.
"""
return df.replace(
to_replace=[
r"^\.$", # Nothing but a decimal point
r"^\s*$", # The empty string and entirely whitespace strings
],
value=np.nan,
regex=True,
)
def simplify_columns(df):
"""Simplify column labels for use as snake_case database fields.
All columns will be re-labeled by:
* Replacing all non-alphanumeric characters with spaces.
* Forcing all letters to be lower case.
* Compacting internal whitespace to a single " ".
* Stripping leading and trailing whitespace.
* Replacing all remaining whitespace with underscores.
Args:
df (pandas.DataFrame): The DataFrame to clean.
Returns:
pandas.DataFrame: The cleaned DataFrame.
Todo:
Update docstring.
"""
df.columns = (
df.columns.str.replace(r"[^0-9a-zA-Z]+", " ", regex=True)
.str.strip()
.str.lower()
.str.replace(r"\s+", " ", regex=True)
.str.replace(" ", "_")
)
return df
def drop_tables(engine, clobber=False):
"""Drops all tables from a SQLite database.
Creates an sa.schema.MetaData object reflecting the structure of the
database that the passed in ``engine`` refers to, and uses that schema to
drop all existing tables.
Todo:
Treat DB connection as a context manager (with/as).
Args:
engine (sa.engine.Engine): An SQL Alchemy SQLite database Engine
pointing at an exising SQLite database to be deleted.
Returns:
None
"""
md = sa.MetaData()
md.reflect(engine)
insp = sa.inspect(engine)
if len(insp.get_table_names()) > 0 and not clobber:
raise AssertionError(
f"You are attempting to drop your database without setting clobber to {clobber}"
)
md.drop_all(engine)
conn = engine.connect()
conn.exec_driver_sql("VACUUM")
conn.close()
def merge_dicts(list_of_dicts):
"""Merge multipe dictionaries together.
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
Args:
dict_args (list): a list of dictionaries.
Returns:
dict
"""
merge_dict = {}
for dictionary in list_of_dicts:
merge_dict.update(dictionary)
return merge_dict
def convert_cols_dtypes(
df: pd.DataFrame, data_source: Optional[str] = None, name: Optional[str] = None
) -> pd.DataFrame:
"""Convert a PUDL dataframe's columns to the correct data type.
Boolean type conversions created a special problem, because null values in
boolean columns get converted to True (which is bonkers!)... we generally
want to preserve the null values and definitely don't want them to be True,
so we are keeping those columns as objects and preforming a simple mask for
the boolean columns.
The other exception in here is with the `utility_id_eia` column. It is
often an object column of strings. All of the strings are numbers, so it
should be possible to convert to :func:`pandas.Int32Dtype` directly, but it
is requiring us to convert to int first. There will probably be other
columns that have this problem... and hopefully pandas just enables this
direct conversion.
Args:
df: dataframe with columns that appear in the PUDL tables.
data_source: the name of the datasource (eia, ferc1, etc.)
name: name of the table (for logging only!)
Returns:
Input dataframe, but with column types as specified by
:py:const:`pudl.metadata.fields.FIELD_METADATA`
"""
# get me all of the columns for the table in the constants dtype dict
dtypes = {
col: dtype
for col, dtype in get_pudl_dtypes(group=data_source).items()
if col in df.columns
}
# grab only the boolean columns (we only need their names)
bool_cols = [col for col in dtypes if dtypes[col] == "boolean"]
# grab all of the non boolean columns
non_bool_cols = {col: dtypes[col] for col in dtypes if col not in bool_cols}
# Grab only the string columns...
string_cols = [col for col in dtypes if dtypes[col] == "string"]
for col in bool_cols:
# Bc the og bool values were sometimes coming across as actual bools or
# strings, for some reason we need to map both types (I'm not sure
# why!). We use na_action to preserve the og NaN's. I've also added in
# the string version of a null value bc I am sure it will exist.
df[col] = df[col].map(
{
"False": False,
"True": True,
False: False,
True: True,
"nan": pd.NA,
}
)
if name:
logger.debug(f"Converting the dtypes of: {name}")
# unfortunately, the pd.Int32Dtype() doesn't allow a conversion from object
# columns to this nullable int type column. `utility_id_eia` shows up as a
# column of strings (!) of numbers so it is an object column, and therefor
# needs to be converted beforehand.
if "utility_id_eia" in df.columns:
# we want to be able to use this dtype cleaning at many stages, and
# sometimes this column has been converted to a float and therefor
# we need to skip this conversion
if df.utility_id_eia.dtypes is np.dtype("object"):
df = df.astype({"utility_id_eia": "float"})
df = (
df.astype(non_bool_cols)
.astype({col: "boolean" for col in bool_cols})
.replace(to_replace="nan", value={col: pd.NA for col in string_cols})
.replace(to_replace="<NA>", value={col: pd.NA for col in string_cols})
)
# Zip codes are highly correlated with datatype. If they datatype gets
# converted at any point it may mess up the accuracy of the data. For
# example: 08401.0 or 8401 are both incorrect versions of 08401 that a
# simple datatype conversion cannot fix. For this reason, we use the
# zero_pad_numeric_string function.
if any("zip_code" for col in df.columns):
zip_cols = [col for col in df.columns if "zip_code" in col]
for col in zip_cols:
if "4" in col:
df.loc[:, col] = zero_pad_numeric_string(df[col], n_digits=4)
else:
df.loc[:, col] = zero_pad_numeric_string(df[col], n_digits=5)
return df
def generate_rolling_avg(df, group_cols, data_col, window, **kwargs):
"""Generate a rolling average.
For a given dataframe with a ``report_date`` column, generate a monthly
rolling average and use this rolling average to impute missing values.
Args:
df (pandas.DataFrame): Original dataframe. Must have group_cols
column, a data_col column and a ``report_date`` column.
group_cols (iterable): a list of columns to groupby.
data_col (str): the name of the data column.
window (int): window from :func:`pandas.Series.rolling`.
kwargs : Additional arguments to pass to
:func:`pandas.Series.rolling`.
Returns:
pandas.DataFrame
"""
df = df.astype({"report_date": "datetime64[ns]"})
# create a full date range for this df
date_range = pd.DataFrame(
pd.date_range(
start=min(df["report_date"]),
end=max(df["report_date"]),
freq="MS",
name="report_date",
)
).assign(
tmp=1
) # assiging a temp column to merge on
groups = (
df[group_cols + ["report_date"]]
.drop_duplicates()
.assign(tmp=1) # assiging a temp column to merge on
)
# merge the date range and the groups together
# to get the backbone/complete date range/groups
bones = (
date_range.merge(groups)
.drop("tmp", axis=1) # drop the temp column
.merge(df, on=group_cols + ["report_date"])
.set_index(group_cols + ["report_date"])
.groupby(by=group_cols + ["report_date"])
.mean()
)
# with the aggregated data, get a rolling average
roll = bones.rolling(window=window, center=True, **kwargs).agg({data_col: "mean"})
# return the merged
return bones.merge(
roll, on=group_cols + ["report_date"], suffixes=("", "_rolling")
).reset_index()
def fillna_w_rolling_avg(df_og, group_cols, data_col, window=12, **kwargs):
"""Filling NaNs with a rolling average.
Imputes null values from a dataframe on a rolling monthly average. To note,
this was designed to work with the PudlTabl object's tables.
Args:
df_og (pandas.DataFrame): Original dataframe. Must have group_cols
column, a data_col column and a 'report_date' column.
group_cols (iterable): a list of columns to groupby.
data_col (str): the name of the data column.
window (int): window from pandas.Series.rolling
kwargs : Additional arguments to pass to
:class:`pandas.Series.rolling`.
Returns:
pandas.DataFrame: dataframe with nulls filled in.
"""
df_og = df_og.astype({"report_date": "datetime64[ns]"})
df_roll = generate_rolling_avg(df_og, group_cols, data_col, window, **kwargs)
df_roll[data_col] = df_roll[data_col].fillna(df_roll[f"{data_col}_rolling"])
df_new = df_og.merge(
df_roll,
how="left",
on=group_cols + ["report_date"],
suffixes=("", "_rollfilled"),
)
df_new[data_col] = df_new[data_col].fillna(df_new[f"{data_col}_rollfilled"])
return df_new.drop(columns=[f"{data_col}_rollfilled", f"{data_col}_rolling"])
def count_records(df, cols, new_count_col_name):
"""Count the number of unique records in group in a dataframe.
Args:
df (panda.DataFrame) : dataframe you would like to groupby and count.
cols (iterable) : list of columns to group and count by.
new_count_col_name (string) : the name that will be assigned to the
column that will contain the count.
Returns:
pandas.DataFrame: dataframe containing only ``cols`` and
``new_count_col_name``.
"""
return (
df.assign(count_me=1)
.groupby(cols, observed=True)
.count_me.count()
.reset_index()
.rename(columns={"count_me": new_count_col_name})
)
def cleanstrings_snake(df, cols):
"""Clean the strings in a columns in a dataframe with snake case.
Args:
df (panda.DataFrame) : original dataframe.
cols (list): list of columns in `df` to apply snake case to.
"""
for col in cols:
df.loc[:, col] = (
df[col]
.astype(str)
.str.strip()
.str.lower()
.str.replace(r"\s+", "_", regex=True)
)
return df
def zero_pad_numeric_string(
col: pd.Series,
n_digits: int,
) -> pd.Series:
"""Clean up fixed-width leading zero padded numeric (e.g. ZIP, FIPS) codes.
Often values like ZIP and FIPS codes are stored as integers, or get
converted to floating point numbers because there are NA values in the
column. Sometimes other non-digit strings are included like Canadian
postal codes mixed in with ZIP codes, or IMP (imported) instead of a
FIPS county code. This function attempts to manage these irregularities
and produce either fixed-width leading zero padded strings of digits
having a specified length (n_digits) or NA.
* Convert the Series to a nullable string.
* Remove any decimal point and all digits following it.
* Remove any non-digit characters.
* Replace any empty strings with NA.
* Replace any strings longer than n_digits with NA.
* Pad remaining digit-only strings to n_digits length.
* Replace (invalid) all-zero codes with NA.
Args:
col: The Series to clean. May be numeric, string, object, etc.
n_digits: the desired length of the output strings.
Returns:
A Series of nullable strings, containing only all-numeric strings
having length n_digits, padded with leading zeroes if necessary.
"""
out_col = (
col.astype("string")
# Remove decimal points and any digits following them.
# This turns floating point strings into integer strings
.replace(r"[\.]+\d*", "", regex=True)
# Remove any whitespace
.replace(r"\s+", "", regex=True)
# Replace anything that's not entirely digits with NA
.replace(r"[^\d]+", pd.NA, regex=True)
# Set any string longer than n_digits to NA
.replace(f"[\\d]{{{n_digits+1},}}", pd.NA, regex=True)
# Pad the numeric string with leading zeroes to n_digits length
.str.zfill(n_digits)
# All-zero ZIP & FIPS codes are invalid.
# Also catches empty strings that were zero padded.
.replace({n_digits * "0": pd.NA})
)
if not out_col.str.match(f"^[\\d]{{{n_digits}}}$").all():
raise ValueError(
f"Failed to generate zero-padded numeric strings of length {n_digits}."
)
return out_col
def iterate_multivalue_dict(**kwargs):
"""Make dicts from dict with main dict key and one value of main dict."""
single_valued = {
k: v
for k, v in kwargs.items()
if not (isinstance(v, list) or isinstance(v, tuple))
}
# Transform multi-valued {k: vlist} into {k1: [{k1: v1}, {k1: v2}, ...], k2: [...], ...}
multi_valued = {
k: [{k: v} for v in vlist]
for k, vlist in kwargs.items()
if (isinstance(vlist, list) or isinstance(vlist, tuple))
}
for value_assignments in itertools.product(*multi_valued.values()):
result = dict(single_valued)
for k_v in value_assignments:
result.update(k_v)
yield result
def get_working_eia_dates():
"""Get all working EIA dates as a DatetimeIndex."""
dates = pd.DatetimeIndex([])
for data_source in DataSource.from_field_namespace("eia"):
working_partitions = data_source.working_partitions
if "years" in working_partitions:
dates = dates.append(
pd.to_datetime(working_partitions["years"], format="%Y")
)
if "year_month" in working_partitions:
dates = dates.append(
pd.DatetimeIndex([pd.to_datetime(working_partitions["year_month"])])
)
return dates
def dedupe_on_category(dedup_df, base_cols, category_name, sorter):
"""Deduplicate a df using a sorted category to retain prefered values.
Use a sorted category column to retain your prefered values when a
dataframe is deduplicated.
Args:
dedup_df (pandas.DataFrame): the dataframe with the record
base_cols (list) : list of columns to use when dropping duplicates
category_name (string) : name of categorical column
sorter (list): sorted list of category options
"""
dedup_df.loc[:, category_name] = dedup_df.loc[:, category_name].astype(
pd.CategoricalDtype(categories=sorter, ordered=True)
)
return dedup_df.drop_duplicates(subset=base_cols, keep="first")
def calc_capacity_factor(df, freq, min_cap_fact=None, max_cap_fact=None):
"""Calculate capacity factor.
Capacity factor is calcuated from the capcity, the net generation over a
time period and the hours in that same time period. The dates from that
dataframe are pulled out to determine the hours in each period based on
the frequency. The number of hours is used in calculating the capacity
factor. Then records with capacity factors outside the range specified by
`min_cap_fact` and `max_cap_fact` are dropped.
Args:
df (pandas.DataFrame): table with components of capacity factor (
`report_date`, `net_generation_mwh` and `capacity_mw`)
min_cap_fact (float): Lower bound, below which values are set to NaN.
If None, don't use a lower bound. Default is None.
max_cap_fact (float): Upper bound, below which values are set to NaN.
If None, don't use an upper bound. Default is None.
freq (str): String describing time frequency at which to aggregate
the reported data, such as 'MS' (month start) or 'AS' (annual
start).
Returns:
pandas.DataFrame: modified version of input `df` with one additional
column (`capacity_factor`).
"""
# get a unique set of dates to generate the number of hours
dates = df["report_date"].drop_duplicates()
dates_to_hours = pd.DataFrame(
data={
"report_date": dates,
"hours": dates.apply(
lambda d: (
| pd.date_range(d, periods=2, freq=freq) | pandas.date_range |
#!/home/centos/anaconda3/bin/python3.6
#-*- coding: utf-8 -*-
import os
os.chdir('/home/ec2-user/utils/inven_data_real/')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import time
from datetime import datetime,timedelta
from pandas import read_csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
# model
from keras.models import Sequential
from keras.layers import LSTM, Dense
from keras.callbacks import EarlyStopping
from keras.layers import Dropout, Activation
from keras.models import Sequential
from keras.models import save_model, load_model
## Evaluate Model
from math import sqrt
from numpy import concatenate
from sklearn.metrics import mean_squared_error
# Time Measure
start_time = time.time()
# argument
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--index',dest='index',type=str,help="what is the parameters?")
args = parser.parse_args()
params = args.index
#program_id = params
import sys
sys.path.append('/home/centos/anaconda3/lib/python3.6/site-packages')
sys.path.append('/home/centos/anaconda3/lib/python3.6/site-packages/pandas')
sys.path.append('/home/centos/anaconda3/lib/python3.6/site-packages/numpy')
# function
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data, index=dataset.index)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
program_id = params
# Import Parameter
paramdata = pd.read_csv('DA_INVEN_PROG_TRG3.csv')
paramselect = paramdata[paramdata['PROGRAM_ID'] == params]
window_size = int(paramselect['WINDOW_SIZE'])
model_neuron = int(paramselect['MODEL_NEURON'])
model_ep = int(paramselect['MODEL_EP'])
model_batchsize = int(paramselect['MODEL_BATCH'])
model_dropout = float(paramselect['MODEL_DROPOUT'])
#model_dropout = 0.4
model_activation =paramselect['MODEL_ACT']
model_loss = paramselect['MODEL_LOSS']
model_optimizer = paramselect['MODEL_OPT']
model_ym = paramselect['YM']
i = window_size
j = 1488
# Start Time
start_time = time.time()
# Import Train data
dataset = pd.read_csv(params+'.csv')
dataset['DATE'] = pd.to_datetime(dataset['DATE_TIME'],format="%Y-%m-%d %H:%M")
dataset.sort_values(by=['DATE'], axis=0, ascending=True,inplace=True)
dataset= dataset.set_index('DATE')
dataset = dataset[['INVEN']]
dataset.index.name = 'date'
dataset_inven = dataset[['INVEN']]
# ensure all data is float
values = dataset.values
values = values.astype('float32')
# normalize features
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
raw_reframed = series_to_supervised(scaled, i+j, 1)
# WINDOW
var_xx = raw_reframed.loc[:,:'var1(t-'+str(j)+')']
# Label
var_yy = raw_reframed['var1(t)']
# MERGE
raw_reframed = pd.concat([var_xx,var_yy], axis=1)
# print(raw_reframed.head())
reframed = raw_reframed
# print(reframed.head(5))
# DateTime
from datetime import datetime,timedelta
current_time = datetime.utcnow() + timedelta(hours=9)
train_date_str = (current_time-timedelta(hours=2)).strftime("%Y%m%d%H")
#train_date_str = '2019052908'
s_traindate = datetime.strptime(train_date_str, '%Y%m%d%H')
# string to datetime
train = reframed[s_traindate:s_traindate].values
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
#print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# load_model
#model = load_model
model = load_model('/home/ec2-user/utils/MODEL_REAL/'+program_id+'.h5')
model.fit(train_X, train_y, epochs=model_ep, batch_size=model_batchsize, verbose=2, shuffle=False)
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
#history = model.fit(train_X, train_y, epochs=model_ep, batch_size=model_batchsize, verbose=2, shuffle=False)
model.save('/home/ec2-user/utils/MODEL_REAL/'+program_id+'.h5')
#model.save('/home/ec2-user/utils/model_hist/'+program_id+'_'+train_date_str+'.h5')
print("Finished Online Training")
train_time = time.time()-start_time
print("Start! Predict Inventory")
predict_date = (datetime.utcnow() + timedelta(hours=9)).strftime("%Y%m%d%H")
print("Making Testing Data")
#test data
from datetime import datetime,timedelta
current_time = datetime.utcnow() + timedelta(hours=9)
test_date_str = (current_time).strftime("%Y%m%d")
test_date = test_date_str + '00'
s_test_date = datetime.strptime(test_date, '%Y%m%d%H')
test_tdate_str = (current_time+timedelta(hours=1488)).strftime("%Y%m%d")
test_tdate = test_tdate_str + '23'
t_test_date = datetime.strptime(test_tdate,'%Y%m%d%H')
test = reframed.loc[s_test_date:t_test_date].values
# split into input and outputs
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
#print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
model = load_model('/home/ec2-user/utils/MODEL_REAL/'+program_id+'.h5')
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
dataset_actual = dataset.loc[s_test_date:t_test_date]["INVEN"]
Aarray_actual = np.array(dataset_actual)
Aarray_predicted = np.array(inv_yhat)
# calculate RMSE
rmse = sqrt(mean_squared_error(Aarray_actual, Aarray_predicted))
print('Test RMSE: %.3f' % rmse)
mape = mean_absolute_percentage_error(Aarray_actual,Aarray_predicted)
print('Test MAPE: %.3f' % mape)
MEAN = np.mean(Aarray_predicted)
print('MEAN: %.3f' %MEAN)
STD = np.std(Aarray_predicted)
print('STD: %.3f' %STD)
result = np.stack((Aarray_actual,Aarray_predicted), axis=1)
result_pd = | pd.DataFrame(result, index=dataset_actual.index, columns=['actual', 'predict']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import pickle
from scipy import stats
import scipy.io
from scipy.spatial.distance import pdist
from scipy.linalg import cholesky
from scipy.io import loadmat
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report,roc_auc_score,recall_score,precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
import SMOTE
import CFS
import metrices_V2 as metrices
import platform
from os import listdir
from os.path import isfile, join
from glob import glob
from pathlib import Path
from os import path
import sys
import os
import copy
import traceback
from pathlib import Path
# Venn diag
from matplotlib_venn import venn2, venn2_circles, venn2_unweighted
from matplotlib_venn import venn3, venn3_circles
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
def load_both_data(project,metric):
understand_path = 'data/understand_files_all/' + project + '_understand.csv'
understand_df = pd.read_csv(understand_path)
understand_df = understand_df.dropna(axis = 1,how='all')
cols_list = understand_df.columns.values.tolist()
for item in ['Kind', 'Name','commit_hash', 'Bugs']:
if item in cols_list:
cols_list.remove(item)
cols_list.insert(0,item)
understand_df = understand_df[cols_list]
cols = understand_df.columns.tolist()
understand_df = understand_df.drop_duplicates(cols[4:len(cols)])
understand_df['Name'] = understand_df.Name.str.rsplit('.',1).str[1]
commit_guru_file_level_path = 'data/commit_guru_file/' + project + '.csv'
commit_guru_file_level_df = pd.read_csv(commit_guru_file_level_path)
commit_guru_file_level_df['commit_hash'] = commit_guru_file_level_df.commit_hash.str.strip('"')
commit_guru_file_level_df = commit_guru_file_level_df[commit_guru_file_level_df['file_name'].str.contains('.java')]
commit_guru_file_level_df['Name'] = commit_guru_file_level_df.file_name.str.rsplit('/',1).str[1].str.split('.').str[0].str.replace('/','.')
commit_guru_file_level_df = commit_guru_file_level_df.drop('file_name',axis = 1)
df = understand_df.merge(commit_guru_file_level_df,how='left',on=['commit_hash','Name'])
cols = df.columns.tolist()
cols.remove('Bugs')
cols.append('Bugs')
df = df[cols]
file_names = df.Name
for item in ['Kind', 'Name','commit_hash']:
if item in cols:
df = df.drop(labels = [item],axis=1)
# df.dropna(inplace=True)
df = df.drop_duplicates()
df.reset_index(drop=True, inplace=True)
y = df.Bugs
X = df.drop('Bugs',axis = 1)
cols = X.columns
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X = pd.DataFrame(X,columns = cols)
imp_mean = IterativeImputer(random_state=0)
X = imp_mean.fit_transform(X)
X = pd.DataFrame(X,columns = cols)
if metric == 'process':
X = X[['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr']]
elif metric == 'product':
X = X.drop(['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr'],axis = 1)
else:
X = X
X['Name'] = file_names
X['Bugs'] = y
return X
def load_data_release_level(project,metric):
understand_path = 'data/understand_files_all/' + project + '_understand.csv'
understand_df = pd.read_csv(understand_path)
understand_df = understand_df.dropna(axis = 1,how='all')
cols_list = understand_df.columns.values.tolist()
for item in ['Kind', 'Name','commit_hash', 'Bugs']:
if item in cols_list:
cols_list.remove(item)
cols_list.insert(0,item)
understand_df = understand_df[cols_list]
cols = understand_df.columns.tolist()
understand_df = understand_df.drop_duplicates(cols[4:len(cols)])
understand_df['Name'] = understand_df.Name.str.rsplit('.',1).str[1]
commit_guru_file_level_path = 'data/commit_guru_file/' + project + '.csv'
commit_guru_file_level_df = pd.read_csv(commit_guru_file_level_path)
commit_guru_file_level_df['commit_hash'] = commit_guru_file_level_df.commit_hash.str.strip('"')
commit_guru_file_level_df = commit_guru_file_level_df[commit_guru_file_level_df['file_name'].str.contains('.java')]
commit_guru_file_level_df['Name'] = commit_guru_file_level_df.file_name.str.rsplit('/',1).str[1].str.split('.').str[0].str.replace('/','.')
commit_guru_file_level_df = commit_guru_file_level_df.drop('file_name',axis = 1)
release_df = pd.read_pickle('data/release/' + project + '_release.pkl')
release_df = release_df.sort_values('created_at',ascending=False)
release_df = release_df.reset_index(drop=True)
release_df['created_at'] = pd.to_datetime(release_df.created_at)
release_df['created_at'] = release_df.created_at.dt.date
commit_guru_path = 'data/commit_guru/' + project + '.csv'
commit_guru_df = pd.read_csv(commit_guru_path)
cols = understand_df.columns.tolist()
commit_guru_df['created_at'] = pd.to_datetime(commit_guru_df.author_date_unix_timestamp,unit='s')
commit_guru_df['created_at'] = commit_guru_df.created_at.dt.date
commit_guru_df = commit_guru_df[['commit_hash','created_at']]
df = understand_df.merge(commit_guru_file_level_df,how='left',on=['commit_hash','Name'])
df = df.merge(commit_guru_df,how='left',on=['commit_hash'])
cols = df.columns.tolist()
cols.remove('Bugs')
cols.append('Bugs')
df = df[cols]
file_names = df.Name
commit_hash = df.commit_hash
for item in ['Kind', 'Name','commit_hash']:
if item in cols:
df = df.drop(labels = [item],axis=1)
df = df.drop_duplicates()
df.reset_index(drop=True, inplace=True)
created_at = df.created_at
df = df.drop('created_at',axis = 1)
y = df.Bugs
X = df.drop('Bugs',axis = 1)
cols = X.columns
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X = pd.DataFrame(X,columns = cols)
imp_mean = IterativeImputer(random_state=0)
X = imp_mean.fit_transform(X)
X = pd.DataFrame(X,columns = cols)
X['created_at'] = created_at
if metric == 'process':
X = X[['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr','created_at']]
elif metric == 'product':
X = X.drop(['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr'],axis = 1)
else:
X = X
df = X
df['Name'] = file_names
df['Bugs'] = y
accepted_commit_dates = []
all_data = | pd.DataFrame() | pandas.DataFrame |
import warnings
from collections import defaultdict
import h5py
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as iu_spline
#from pipeline import PipelineException
import matplotlib
import pandas as pd
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
try:
import cv2
except ImportError:
print("Could not find cv2. You won't be able to use the pupil tracker.")
ANALOG_PACKET_LEN = 2000
def read_video_hdf5(hdf_path):
"""
Reads hdf5 file for eye tracking
:param hdf_path: path of the file. Needs a %d where multiple files differ.
:return: dictionary with the data
"""
data = {}
with h5py.File(hdf_path, 'r+', driver='family', memb_size=0) as fid:
data['version'] = fid.attrs['Version']
if float(fid.attrs['Version']) == 2.:
data['ball'] = np.asarray(fid['Wheel']).T
wf = np.asarray(np.asarray(fid['Analog Signals'])).T
data['framenum_ts'] = np.asarray(fid['framenum_ts']).squeeze()
data['trialnum_ts'] = np.asarray(fid['trialnum_ts']).squeeze()
data['eyecam_ts'] = np.asarray(fid['videotimestamps']).squeeze()
data['syncPd'] = wf[:, 0] # flip photo diode
data['scanImage'] = wf[:, 1]
data['ts'] = wf[:, 2]
data['analogPacketLen'] = float(fid.attrs['AS_samples_per_channel'])
elif float(fid.attrs['Version']) == 1.:
data['ball'] = np.asarray(fid['ball']).T
wf = np.asarray(np.asarray(fid['waveform'])).T
data['cam1ts'] = np.asarray(fid['behaviorvideotimestamp']).squeeze()
data['cam2ts'] = np.asarray(fid['eyetrackingvideotimestamp']).squeeze()
data['syncPd'] = wf[:, 2] # flip photo diode
data['scanImage'] = wf[:, 9]
data['ts'] = wf[:, 10]
data['analogPacketLen'] = ANALOG_PACKET_LEN
else:
print('File version not known')
return data
def ts2sec(ts, packet_length=0, samplingrate=1e7):
"""
Convert 10MHz timestamps from Saumil's patching program (ts) to seconds (s)
:param ts: timestamps
:param packet_length: length of timestamped packets
:returns:
timestamps converted to seconds
system time (in seconds) of t=0
bad camera indices from 2^31:2^32 in camera timestamps prior to 4/10/13
"""
ts = ts.astype(float)
# find bad indices in camera timestamps and replace with linear est
bad_idx = ts == 2 ** 31 - 1
if bad_idx.sum() > 10:
raise PipelineException('Bad camera ts...')
x = np.where(~bad_idx)[0]
x_bad = np.where(bad_idx)[0]
f = iu_spline(x, ts[~bad_idx], k=1)
ts[bad_idx] = f(x_bad)
# remove wraparound
wrap_idx = np.where(np.diff(ts) < 0)[0]
while not len(wrap_idx) == 0:
ts[wrap_idx[0] + 1:] += 2 ** 32
wrap_idx = np.where(np.diff(ts) < 0)[0]
s = ts / samplingrate
# Remove offset, and if not monotonically increasing (i.e. for packeted ts), interpolate
if np.any(np.diff(s) <= 0):
# Check to make sure it's packets
diffs = np.where(np.diff(s) > 0)[0]
assert packet_length == diffs[0] + 1
# Interpolate
not_zero = np.hstack((0, diffs + 1))
f = iu_spline(not_zero, s[not_zero], k=1)
s = f(np.arange(len(s)))
return s, bad_idx
class CVROIGrabber:
start = None
end = None
roi = None
def __init__(self, img):
self.img = img
self.exit = False
def grab(self):
print('Contrast (std)', np.std(self.img))
img = np.asarray(self.img / self.img.max(), dtype=float)
cv2.namedWindow('real image')
cv2.setMouseCallback('real image', self, 0)
while not self.exit:
cv2.imshow('real image', img)
if (cv2.waitKey(0) & 0xFF) == ord('q'):
cv2.waitKey(1)
cv2.destroyAllWindows()
break
cv2.waitKey(2)
def __call__(self, event, x, y, flags, params):
img = self.img
if event == cv2.EVENT_LBUTTONDOWN:
print('Start Mouse Position: ' + str(x) + ', ' + str(y))
self.start = np.asarray([x, y])
elif event == cv2.EVENT_LBUTTONUP:
self.end = np.asarray([x, y])
x = np.vstack((self.start, self.end))
tmp = np.hstack((x.min(axis=0), x.max(axis=0)))
roi = np.asarray([[tmp[1], tmp[3]], [tmp[0], tmp[2]]], dtype=int) + 1
print(roi)
crop = img[roi[0, 0]:roi[0, 1], roi[1, 0]:roi[1, 1]]
crop = np.asarray(crop / crop.max(), dtype=float)
self.roi = roi
cv2.imshow('crop', crop)
if (cv2.waitKey(0) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
self.exit = True
class ROIGrabber:
"""
Interactive matplotlib figure to grab an ROI from an image.
Usage:
rg = ROIGrabber(img)
# select roi
print(rg.roi) # get ROI
"""
def __init__(self, img):
plt.switch_backend('GTK3Agg')
self.img = img
self.start = None
self.current = None
self.end = None
self.pressed = False
self.fig, self.ax = plt.subplots(facecolor='w')
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.fig.canvas.mpl_connect('motion_notify_event', self.on_move)
self.replot()
plt.show(block=True)
def draw_rect(self, fr, to, color='dodgerblue'):
x = np.vstack((fr, to))
fr = x.min(axis=0)
to = x.max(axis=0)
self.ax.plot(fr[0] * np.ones(2), [fr[1], to[1]], color=color, lw=2)
self.ax.plot(to[0] * np.ones(2), [fr[1], to[1]], color=color, lw=2)
self.ax.plot([fr[0], to[0]], fr[1] * np.ones(2), color=color, lw=2)
self.ax.plot([fr[0], to[0]], to[1] * np.ones(2), color=color, lw=2)
self.ax.plot(fr[0], fr[1], 'ok', mfc='gold')
self.ax.plot(to[0], to[1], 'ok', mfc='deeppink')
def replot(self):
self.ax.clear()
self.ax.imshow(self.img, cmap=plt.cm.gray)
if self.pressed:
self.draw_rect(self.start, self.current, color='lime')
elif self.start is not None and self.end is not None:
self.draw_rect(self.start, self.current)
self.ax.axis('tight')
self.ax.set_aspect(1)
self.ax.set_title('Close window when done', fontsize=16, fontweight='bold')
plt.draw()
@property
def roi(self):
x = np.vstack((self.start, self.end))
tmp = np.hstack((x.min(axis=0), x.max(axis=0)))
return np.asarray([[tmp[1], tmp[3]], [tmp[0], tmp[2]]], dtype=int) + 1
def on_press(self, event):
if event.xdata is not None and event.ydata is not None:
self.pressed = True
self.start = np.asarray([event.xdata, event.ydata])
def on_release(self, event):
if event.xdata is not None and event.ydata is not None:
self.end = np.asarray([event.xdata, event.ydata])
else:
self.end = self.current
self.pressed = False
self.replot()
def on_move(self, event):
if event.xdata is not None and event.ydata is not None:
self.current = np.asarray([event.xdata, event.ydata])
if self.pressed:
self.replot()
class PupilTracker:
"""
Parameters:
perc_high : float # upper percentile for bright pixels
perc_low : float # lower percentile for dark pixels
perc_weight : float # threshold will be perc_weight*perc_low + (1- perc_weight)*perc_high
relative_area_threshold : float # enclosing rotating rectangle has to have at least that amount of area
ratio_threshold : float # ratio of major and minor radius cannot be larger than this
error_threshold : float # threshold on the RMSE of the ellipse fit
min_contour_len : int # minimal required contour length (must be at least 5)
margin : float # relative margin the pupil center should not be in
contrast_threshold : float # contrast below that threshold are considered dark
speed_threshold : float # eye center can at most move that fraction of the roi between frames
dr_threshold : float # maximally allow relative change in radius
"""
def __init__(self, param):
self._params = param
self._center = None
self._radius = None
self._last_detection = 1
@staticmethod
def goodness_of_fit(contour, ellipse):
center, size, angle = ellipse
angle *= np.pi / 180
err = 0
for coord in contour.squeeze().astype(np.float):
posx = (coord[0] - center[0]) * np.cos(-angle) - (coord[1] - center[1]) * np.sin(-angle)
posy = (coord[0] - center[0]) * np.sin(-angle) + (coord[1] - center[1]) * np.cos(-angle)
err += ((posx / size[0]) ** 2 + (posy / size[1]) ** 2 - 0.25) ** 2
return np.sqrt(err / len(contour))
@staticmethod
def restrict_to_long_axis(contour, ellipse, corridor):
center, size, angle = ellipse
angle *= np.pi / 180
R = np.asarray([[np.cos(-angle), - np.sin(-angle)], [np.sin(-angle), np.cos(-angle)]])
contour = np.dot(contour.squeeze() - center, R.T)
contour = contour[np.abs(contour[:, 0]) < corridor * ellipse[1][1] / 2]
return (np.dot(contour, R) + center).astype(np.int32)
def get_pupil_from_contours(self, contours, small_gray, show_matching=5):
ratio_thres = self._params['ratio_threshold']
area_threshold = self._params['relative_area_threshold']
error_threshold = self._params['error_threshold']
min_contour = self._params['min_contour_len']
margin = self._params['margin']
speed_thres = self._params['speed_threshold']
dr_thres = self._params['dr_threshold']
err = np.inf
best_ellipse = None
best_contour = None
results, cond = defaultdict(list), defaultdict(list)
for j, cnt in enumerate(contours):
if len(contours[j]) < min_contour: # otherwise fitEllipse won't work
continue
ellipse = cv2.fitEllipse(contours[j])
((x, y), axes, angle) = ellipse
if min(axes) == 0: # otherwise ratio won't work
continue
ratio = max(axes) / min(axes)
area = np.prod(ellipse[1]) / np.prod(small_gray.shape)
curr_err = self.goodness_of_fit(cnt, ellipse)
results['ratio'].append(ratio)
results['area'].append(area)
results['rmse'].append(curr_err)
results['x coord'].append(x / small_gray.shape[1])
results['y coord'].append(y / small_gray.shape[0])
center = np.array([x / small_gray.shape[1], y / small_gray.shape[0]])
r = max(axes)
dr = 0 if self._radius is None else np.abs(r - self._radius) / self._radius
dx = 0 if self._center is None else np.sqrt(np.sum((center - self._center) ** 2))
results['dx'].append(dx)
results['dr/r'].append(dr)
matching_conditions = 1 * (ratio <= ratio_thres) + 1 * (area >= area_threshold) \
+ 1 * (curr_err < error_threshold) \
+ 1 * (margin < center[0] < 1 - margin) \
+ 1 * (margin < center[1] < 1 - margin) \
+ 1 * (dx < speed_thres * self._last_detection) \
+ 1 * (dr < dr_thres * self._last_detection)
cond['ratio'].append(ratio <= ratio_thres)
cond['area'].append(area >= area_threshold)
cond['rmse'].append(curr_err < error_threshold)
cond['x coord'].append(margin < center[0] < 1 - margin)
cond['y coord'].append(margin < center[1] < 1 - margin)
cond['dx'].append(dx < speed_thres * self._last_detection)
cond['dr/r'].append(dr < dr_thres * self._last_detection)
results['conditions'] = matching_conditions
cond['conditions'].append(True)
if curr_err < err and matching_conditions == 7:
best_ellipse = ellipse
best_contour = cnt
err = curr_err
cv2.ellipse(small_gray, ellipse, (0, 0, 255), 2)
elif matching_conditions >= show_matching:
cv2.ellipse(small_gray, ellipse, (255, 0, 0), 2)
if best_ellipse is None:
df = pd.DataFrame(results)
df2 = | pd.DataFrame(cond) | pandas.DataFrame |
# coding: utf-8
# # Explore simulated relationship (part 2)
#
# This notebook is using simulated data generated from [main_Pa_sim_enhance_AtoB](1_main_Pa_sim_enhance_AtoB.ipynb). This notebook input raw Pseudomonas gene expression data from the Pseudomonas compendium referenced in [ADAGE](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5069748/) paper and added a strong nonlinear signal. This signal assigned a set of genes to group A and a set of genes to group B. If the expression of genes in group A exceeded some threshold then the genes in group B were upregulated.
#
# This notebook is extending from the exploration performed in [explore_relationship_AandB_pt1](explore_relationship_AandB_pt1.ipynb). In this notebook we determined that the modeled/predicted gene expression data between A and B (i.e. after applying a linear transformation in the latent space and decoding) is a mostly linear relationship. We assume that this means that the *decoder* is learning this linear relationship. So now we want to determine what the *encoder* is learning.
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import pandas as pd
import numpy as np
import random
import glob
import pickle
import seaborn as sns
from keras.models import model_from_json, load_model
from functions import utils
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
from numpy.random import seed
randomState = 123
seed(randomState)
# In[2]:
# Run notebook to generate simulated data
#%run ./main_Pa_sim_enhance_AtoB.ipynb
# In[3]:
# Load
base_dir = os.path.dirname(os.getcwd())
analysis_name = 'sim_balancedAB_100_2latent'
sim_data_file = os.path.join(
base_dir,
"data",
analysis_name,
"train_model_input.txt.xz"
)
A_file = os.path.join(
base_dir,
"data",
analysis_name,
"geneSetA.txt"
)
B_file = os.path.join(
base_dir,
"data",
analysis_name,
"geneSetB.txt"
)
offset_vae_file = os.path.join(
os.path.dirname(os.getcwd()),
"encoded",
analysis_name,
"offset_latent_space_vae.txt"
)
weight_file = os.path.join(
os.path.dirname(os.getcwd()),
"data",
analysis_name,
"VAE_weight_matrix.txt"
)
model_encoder_file = glob.glob(os.path.join(
base_dir,
"models",
analysis_name,
"*_encoder_model.h5"))[0]
weights_encoder_file = glob.glob(
os.path.join(
base_dir,
"models",
analysis_name,
"*_encoder_weights.h5"
)
)[0]
model_decoder_file = glob.glob(
os.path.join(
base_dir,
"models",
analysis_name,
"*_decoder_model.h5"
)
)[0]
weights_decoder_file = glob.glob(
os.path.join(
base_dir,
"models",
analysis_name,
"*_decoder_weights.h5"
)
)[0]
# In[4]:
# Output image files
positive_trend_file = os.path.join(
base_dir,
"viz",
analysis_name,
"input_A_B.png"
)
model_trend_file = os.path.join(
base_dir,
"viz",
analysis_name,
"model_A_transformB.png"
)
# In[5]:
# Read data
sim_data = pd.read_table(sim_data_file, index_col=0, header=0, compression='xz')
geneSetA = pd.read_table(A_file, header=0, index_col=0)
geneSetB = pd.read_table(B_file, header=0, index_col=0)
print(sim_data.shape)
sim_data.head()
# In[6]:
# Select samples that have expression of gene A around the threshold
# Since threshold is 0.5 then select samples with expression in range(0.4, 0.6)
# Since our simulation set all genes in set A to be the same value for a give sample
# we can consider a single gene in set A to query by
rep_gene_A = geneSetA.iloc[0][0]
# Query for samples whose representative gene A expression is in range (0.4, 0.6)
#test_samples = sim_data.query('0.4 < @rep_gene_A < 0.6') -- why didn't this work?
test_samples = sim_data[(sim_data[rep_gene_A]>0.4) & (sim_data[rep_gene_A]<0.6)]
test_samples_sorted = test_samples.sort_values(by=[rep_gene_A])
print(test_samples_sorted.shape)
test_samples_sorted.head()
# ## 1. Trend of gene B with respect to A (input)
#
# How is B changing with respect to A in our simulated dataset (before the data goes into the autoencoder)?
#
# Plot gene expression of A vs mean(gene B expression). This plot will serve as a reference against later plots that will show the relationship between A and B after transforming the data (i.e. after the data has been fed through the autoencoder)
# In[7]:
# Get the means of B genes
# Convert dataframe with gene ids to list
geneSetB_ls = geneSetB['gene id'].values.tolist()
geneSetB_exp = test_samples[geneSetB_ls]
# Get the mean for each sample
geneSetB_mean = geneSetB_exp.mean(axis=1)
geneSetB_mean.head()
# In[8]:
# Join original expression of A and mean(transformed expression of B)
original_A_exp = test_samples[rep_gene_A]
original_B_mean_exp = geneSetB_mean
A_and_B_before_df = pd.merge(original_A_exp.to_frame('gene A untransformed'),
original_B_mean_exp.to_frame('mean gene B untransformed'),
left_index=True, right_index=True)
A_and_B_before_df.head()
# **Plot**
# The plot below shows the signal that was added to the dataset. This signal assigned a set of genes to group A and a set of genes to group B. If the expression of genes in group A exceeded some threshold then the genes in group B were upregulated.
#
# So we see a step function relationship between the expression of genes in group A and the expression of genes in group B. With a threshold of 0.5 we can see that the expression of genes in B are upregulated.
# In[9]:
# Plot
positive_signal = sns.regplot(x='gene A untransformed',
y='mean gene B untransformed',
data = A_and_B_before_df)
fig = positive_signal.get_figure()
fig.savefig(positive_trend_file, dpi=300)
# ## 2. Trend of gene B with respect to A (decoder)
#
# How is B changing with respect to A after applying our latent space transformation?
#
# Here we are only changing samples **after** they have been encoded into the latent space and we apply our latent space transformation. Therefore, any trends that we observe we conclude that this relationship is what the decoder is learning.
# In[10]:
# Define function to apply latent space transformation and output reconstructed data
def interpolate_in_vae_latent_space_AB(all_data,
sample_data,
model_encoder_file,
model_decoder_file,
weights_encoder_file,
weights_decoder_file,
encoded_dir,
gene_id,
percent_low,
percent_high,
out_dir):
"""
interpolate_in_vae_latent_space(all_data: dataframe,
sample_data: dataframe,
model_encoder_file: string,
model_decoder_file: string,
weights_encoder_file: string,
weights_decoder_file: string,
encoded_dir: string,
gene_id: string,
percent_low: integer,
percent_high: integer,
out_dir: string):
input:
all_data: Dataframe with gene expression data from all samples
sample_data: Dataframe with gene expression data from subset of samples (around the treshold)
model_encoder_file: file containing the learned vae encoder model
model_decoder_file: file containing the learned vae decoder model
weights_encoder_file: file containing the learned weights associated with the vae encoder model
weights_decoder_file: file containing the learned weights associated with the vae decoder model
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
out_dir: directory to output predicted gene expression to
computation:
1. Sort samples based on the expression level of the target gene defined by the user
2. Sample_data are encoded into VAE latent space
3. We predict the expression profile of the OTHER genes at a given level of target gene
expression by adding a scale factor of offset vector to the sample
The scale factor depends on the distance along the target gene expression gradient
the sample is. For example the range along the target gene expression is from 0 to 1.
If the sample of interest has a target gene expression of 0.3 then our prediction
for the gene expression of all other genes is equal to the gene expression corresponding
to the target gene expression=0 + 0.3*offset latent vector
3. Prediction is decoded back into gene space
4. This computation is repeated for all samples
output:
1. encoded predicted expression profile per sample
2. predicted expression profile per sample
"""
# Load arguments
offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt")
# Output file
predict_file = os.path.join(out_dir, "predicted_gene_exp.txt")
predict_encoded_file = os.path.join(out_dir, "predicted_encoded_gene_exp.txt")
# Read in data
target_gene_data = all_data[gene_id]
offset_encoded = pd.read_table(offset_file, header=0, index_col=0)
# read in saved VAE models
loaded_model = load_model(model_encoder_file)
loaded_decoder_model = load_model(model_decoder_file)
# load weights into models
loaded_model.load_weights(weights_encoder_file)
loaded_decoder_model.load_weights(weights_decoder_file)
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values()
lowest_file = os.path.join(encoded_dir, "lowest_encoded_vae.txt")
low_exp_encoded = pd.read_table(lowest_file, header=0, index_col=0)
# Average gene expression across samples in each extreme group
lowest_mean_encoded = low_exp_encoded.mean(axis=0)
# Format and rename as "baseline"
baseline_encoded = pd.DataFrame(
lowest_mean_encoded, index=offset_encoded.columns).T
# Initialize dataframe for predicted expression of sampled data
predicted_sample_data = pd.DataFrame(columns=sample_data.columns)
predicted_encoded_sample_data = pd.DataFrame()
sample_ids = sample_data.index
for sample_id in sample_ids:
intermediate_target_gene_exp = target_gene_sorted[sample_id]
print('gene A exp is {}'.format(intermediate_target_gene_exp))
alpha = get_scale_factor(
target_gene_sorted, intermediate_target_gene_exp, percent_low, percent_high)
print('scale factor is {}'.format(alpha))
predict = baseline_encoded + alpha * offset_encoded
predict_encoded_df = pd.DataFrame(predict)
predicted_encoded_sample_data = (
predicted_encoded_sample_data
.append(predict_encoded_df, ignore_index=True)
)
# Decode prediction
predict_decoded = loaded_decoder_model.predict_on_batch(predict)
predict_df = pd.DataFrame(
predict_decoded, columns=sample_data.columns)
predicted_sample_data = (
predicted_sample_data
.append(predict_df, ignore_index=True)
)
predicted_sample_data.set_index(sample_data.index, inplace=True)
predicted_encoded_sample_data.set_index(sample_data.index, inplace=True)
# Output estimated gene experession values
predicted_sample_data.to_csv(predict_file, sep='\t')
predicted_encoded_sample_data.to_csv(predict_encoded_file, sep='\t')
def get_scale_factor(target_gene_sorted, expression_profile,
percent_low, percent_high):
"""
get_scale_factor(target_gene_sorted: dataframe,
expression_profile: dataframe,
percent_low: integer,
percent_high: integer,):
input:
target_gene_sorted: dataframe of sorted target gene expression
expression_profile: dataframe of gene expression for selected sample
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
Determine how much to scale offset based on distance along the target gene expression gradient
Output:
scale factor = intermediate gene expression/ (average high target gene expression - avgerage low target gene expression)
"""
# Collect the extreme gene expressions
# Get sample IDs with the lowest 5% of reference gene expression
threshold_low = np.percentile(target_gene_sorted, percent_low)
lowest = target_gene_sorted[target_gene_sorted <= threshold_low]
# Get sample IDs with the highest 5% of reference gene expression
threshold_high = np.percentile(target_gene_sorted, percent_high)
highest = target_gene_sorted[target_gene_sorted >= threshold_high]
# Average gene expression across samples in each extreme group
lowest_mean = (lowest.values).mean()
highest_mean = (highest.values).mean()
# Different in extremes
denom = highest_mean - lowest_mean
# scale_factor is the proportion along the gene expression gradient
scale_factor = expression_profile / denom
return scale_factor
# In[11]:
# Apply function
out_dir = os.path.join(base_dir, "output", analysis_name)
encoded_dir = os.path.join(base_dir, "encoded", analysis_name)
percent_low = 5
percent_high = 95
interpolate_in_vae_latent_space_AB(sim_data,
test_samples_sorted,
model_encoder_file,
model_decoder_file,
weights_encoder_file,
weights_decoder_file,
encoded_dir,
rep_gene_A,
percent_low,
percent_high,
out_dir)
# **Plot**
# Plot transformed gene expression A vs mean transformed expression of genes in set B
#
# Q: What is the relationship between genes in set A and B? As the expression of A varies how does the expression of B vary?
# In[12]:
# Read dataframe with gene expression transformed
predict_file = os.path.join(base_dir, "output", analysis_name, "predicted_gene_exp.txt")
predict_gene_exp = pd.read_table(predict_file, header=0, index_col=0)
print(predict_gene_exp.shape)
predict_gene_exp.head()
# In[13]:
# Get the means of B genes
# Convert dataframe with gene ids to list
geneSetB_ls = geneSetB['gene id'].values.tolist()
geneSetB_exp = predict_gene_exp[geneSetB_ls]
# Get the mean for each sample
geneSetB_mean = geneSetB_exp.mean(axis=1)
geneSetB_mean.head()
# In[14]:
# Join original expression of transformed A and mean(transformed expression of B)
predict_A_exp = predict_gene_exp[rep_gene_A]
predict_B_mean_exp = geneSetB_mean
A_and_B_predict_df = pd.merge(original_A_exp.to_frame('gene A untransformed'),
predict_B_mean_exp.to_frame('mean gene B transformed'),
left_index=True, right_index=True)
A_and_B_predict_df.head()
# In[15]:
# Plot
sns.regplot(x='gene A untransformed',
y='mean gene B transformed',
data = A_and_B_predict_df)
# ## 3. Trend of gene B with respect to A (encoder)
#
# How is B changing with respect to A after shifting input expression and then applying our latent space transformation?
#
# Here we are only changing samples **before** they have been encoded into the latent space and then we apply our latent space transformation. If we compare these trends with those from #2 module, which show what the decoder is supposedly learning, then we can conclude what the encoder is learning.
#
# In order to test this we manually shift A genes from being below the activation threshold to being above it and see how the gene expression data is reconstructed
# In[16]:
# Artificially shift gene A expression
# Get single sample
test_sample = test_samples_sorted.index[0]
print(test_sample)
# Sample with original value of gene A
A_exp_sample = test_samples_sorted.loc[test_sample]
A_exp_sample_modified_df = pd.DataFrame()
A_exp_sample_modified_df.append(A_exp_sample, ignore_index=True)
# Convert dataframe with gene ids to list
geneSetA_ls = geneSetA['gene id'].values.tolist()
# Artificially shift genes in set A
new_A_exp = np.linspace(0.41, 0.60, num=100)
for i in new_A_exp:
test_samples_sorted.loc[test_sample,geneSetA_ls] = i
A_exp_sample = test_samples_sorted.loc[test_sample]
A_exp_sample_modified_df = A_exp_sample_modified_df.append(A_exp_sample, ignore_index=True)
A_exp_sample_modified_df.head()
# In[17]:
# Define function to apply latent space transformation to SHIFTED data and output reconstructed data
def interpolate_in_vae_latent_space_shiftA(all_data,
sample_data,
model_encoder_file,
model_decoder_file,
weights_encoder_file,
weights_decoder_file,
encoded_dir,
gene_id,
percent_low,
percent_high,
out_dir):
"""
interpolate_in_vae_latent_space(all_data: dataframe,
sample_data: dataframe,
model_encoder_file: string,
model_decoder_file: string,
weights_encoder_file: string,
weights_decoder_file: string,
encoded_dir: string,
gene_id: string,
percent_low: integer,
percent_high: integer,
out_dir: string):
input:
all_data: Dataframe with gene expression data from all samples
sample_data: Dataframe with gene expression data from subset of samples (around the treshold)
model_encoder_file: file containing the learned vae encoder model
model_decoder_file: file containing the learned vae decoder model
weights_encoder_file: file containing the learned weights associated with the vae encoder model
weights_decoder_file: file containing the learned weights associated with the vae decoder model
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
out_dir: directory to output predicted gene expression to
computation:
1. Sort samples based on the expression level of the target gene defined by the user
2. Sample_data are encoded into VAE latent space
3. We predict the expression profile of the OTHER genes at a given level of target gene
expression by adding a scale factor of offset vector to the sample
The scale factor depends on the distance along the target gene expression gradient
the sample is. For example the range along the target gene expression is from 0 to 1.
If the sample of interest has a target gene expression of 0.3 then our prediction
for the gene expression of all other genes is equal to the gene expression corresponding
to the target gene expression=0 + 0.3*offset latent vector
3. Prediction is decoded back into gene space
4. This computation is repeated for all samples
output:
1. encoded predicted expression profile per sample
2. predicted expression profile per sample
"""
# Load arguments
offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt")
# Output file
predict_file = os.path.join(out_dir, "shifted_predicted_gene_exp.txt")
predict_encoded_file = os.path.join(out_dir, "shifted_predicted_encoded_gene_exp.txt")
# Read in data
target_gene_data = all_data[gene_id]
offset_encoded = pd.read_table(offset_file, header=0, index_col=0)
# read in saved VAE models
loaded_model = load_model(model_encoder_file)
loaded_decoder_model = load_model(model_decoder_file)
# load weights into models
loaded_model.load_weights(weights_encoder_file)
loaded_decoder_model.load_weights(weights_decoder_file)
# Initialize dataframe for predicted expression of sampled data
predicted_sample_data = pd.DataFrame(columns=sample_data.columns)
predicted_encoded_sample_data = pd.DataFrame()
sample_ids = sample_data.index
for sample_id in sample_ids:
sample_exp = sample_data.loc[sample_id].to_frame().T
# Use trained model to encode expression data into SAME latent space
predict = loaded_model.predict_on_batch(sample_exp)
predict_encoded_df = pd.DataFrame(predict)
predicted_encoded_sample_data = (
predicted_encoded_sample_data
.append(predict_encoded_df, ignore_index=True)
)
# Decode prediction
predict_decoded = loaded_decoder_model.predict_on_batch(predict_encoded_df)
predict_df = pd.DataFrame(
predict_decoded, columns=sample_data.columns)
predicted_sample_data = (
predicted_sample_data
.append(predict_df, ignore_index=True)
)
predicted_sample_data.set_index(sample_data.index, inplace=True)
predicted_encoded_sample_data.set_index(sample_data.index, inplace=True)
# Output estimated gene experession values
predicted_sample_data.to_csv(predict_file, sep='\t')
predicted_encoded_sample_data.to_csv(predict_encoded_file, sep='\t')
# In[18]:
# Define function to apply latent space transformation to SHIFTED data and output reconstructed data
def interpolate_in_pca_latent_space_shiftA(all_data,
sample_data,
model_dir,
encoded_dir,
gene_id,
percent_low,
percent_high,
out_dir):
"""
interpolate_in_pca_latent_space(data_dir: string,
model_dir: string,
encoded_dir: string,
gene_id: string,
out_dir: string,
percent_low: integer,
percent_high: integer):
input:
data_dir: directory containing the raw gene expression data and the offset vector
model_dir: directory containing the learned vae models
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below
out_dir: directory to output predicted gene expression to
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
1. Sort samples based on the expression level of the target gene defined by the user
2. Samples are encoded into PCA latent space
3. We predict the expression profile of the OTHER genes at a given level of target gene
expression by adding a scale factor of offset vector to the sample
The scale factor depends on the distance along the target gene expression gradient
the sample is. For example the range along the target gene expression is from 0 to 1.
If the sample of interest has a target gene expression of 0.3 then our prediction
for the gene expression of all other genes is equal to the gene expression corresponding
to the target gene expression=0 + 0.3*offset latent vector
3. Prediction is decoded back into gene space
4. This computation is repeated for all samples
output:
1. predicted expression profile per sample (intermediate samples x 2 statistical scores --> correlation and pvalue)
2. target gene expression sorted by expression level for reference when plotting
"""
# Load arguments
#offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt")
model_file = os.path.join(model_dir, "pca_model.pkl")
# Output file
predict_file = os.path.join(out_dir, "shifted_predicted_gene_exp.txt")
predict_encoded_file = os.path.join(out_dir, "shifted_predicted_encoded_gene_exp.txt")
# Read in data
target_gene_data = all_data[gene_id]
#offset_encoded = pd.read_table(offset_file, header=0, index_col=0)
# load pca model
infile = open(model_file, 'rb')
pca = pickle.load(infile)
infile.close()
# Initialize dataframe for predicted expression of sampled data
predicted_sample_data = pd.DataFrame(columns=sample_data.columns)
predicted_encoded_sample_data = | pd.DataFrame() | pandas.DataFrame |
"""
algo for test
input None
output detail open_time, open_price, direction, expect_price, dump_price, dump_time
None
"""
import json
import pickle
import datetime
import time
import os
import numpy as np
import pandas as pd
import board_push
from util.util import haunter
LOG = haunter()
cache_jqd = "./cache_jqd/"
cache_ticker = "./cache_ticker/"
cache_matrix = "./cache_matrix/"
cache_runtime = "./cache_runtime/"
EARLY = 4
SETUP = 7
INTER = 2
def end_nine(sub_df, ealier = 4, setup = 9, inter = 2):
"""
0 1 2 3
4 - 12
return 1 => put down
return -1 => wake up
"""
arr = np.array(sub_df["close"])
if len(arr) != ealier+setup: # 4+9
return None
direction = None
for index, item in enumerate(arr):
if index < ealier:
continue
elif index == ealier:
direction = np.sign(item-arr[index-ealier])
elif direction == np.sign(item-arr[index-ealier]):
continue
else:
return None
if direction == None:
raise
return direction
def algo_DeMark_lite(sub_df, ealier = 4, setup = 7, inter = 2):
"""
DeMark Indicators are designed to anticipate turning points in the market.
<NAME> created a strategy called a sequential that finds an overextended price move,
one that is likely to change direction and takes a countertrend position.
To get a buy signal, the following three steps are applied to daily data:
1) Setup. There must be a decline of at least nine or more consequtive closes that are
lower than the corresponding closes four days ealier. If today’s close is equal to or
greater than the close four days before, the setup must begin again.
2) Intersection. To assure prices are declinging in an orderly fashion rather than
plunging, the high of any day on or after the eighth day of the set up must be greater
than the low of any day three or more days earlier.
3) Countdown. Once setup and intersection have been satifiedm we count the number of days
in which we close lower than the close two days ago (doesn’t need to be continuous).
When the countdown reaches 13, we get a buy signal unless one of the following occurs:
a. There is a close that exceeds the highest intraday high that occured during the setup stage.
b. A sell setup occurs (nine consequtive closes above the corresponding closes four days earlier).
c. Another buy setup occurs before the buy countdown is completed.
Traders should expect that the development of the entire formation take no less than 21 days,
but more typically 24-39 days.
Luckily there are many systems avaliable which do the counts for us. Bloomberg is one such example.
"""
# lite version is only 1 & 2, not apply step 3
arr = np.array(sub_df["close"])
if len(arr) != ealier+setup+inter: # 4+9
return None
direction = None
for index, item in enumerate(arr):
if index < ealier:
continue
elif index == ealier:
direction = np.sign(item-arr[index-ealier])
elif direction == 0:
return None
elif direction == np.sign(item-arr[index-ealier]):
continue
else:
return None
## check 8 & 9
if direction == -1: # up
decling = sub_df["low"][sub_df["high"][-2]>sub_df["low"]].count()
if decling < 3:
decling = sub_df["low"][sub_df["high"][-1]>sub_df["low"]].count()
if decling < 3:
return None
elif direction == 1: # down
decling = sub_df["high"][sub_df["low"][-2]>sub_df["high"]].count()
if decling < 3:
decling = sub_df["high"][sub_df["low"][-1]>sub_df["high"]].count()
if decling < 3:
return None
else:
raise
return direction
def launch():
action = None
# detail_dict
# detail = { "ticker":None,
# "open_price":None,
# "open_time":None,
# "direction":None,
# "expect_price":None,
# "dump_price":None,
# "dump_time":None,
# }
# detail DataFrame
detail_df = pd.DataFrame(columns=[
"ticker",
"open_time", "open_price",
"direction", "expect_price",
"dump_price", "dump_time"])
# init
ticker_list = ["000001.XSHG"]
ticker = ticker_list[0]
# baseline
baseline = "000001.XSHG"
with open(cache_runtime + baseline, 'rb') as f:
baseline_df = pickle.load(f)
baseline_now = baseline_df.index[-1]
# load data
with open(cache_runtime + ticker, 'rb') as f:
df = pickle.load(f)
# open
# LOG.debug(int(df["close"][2]*100) % 10)
# LOG.info(int(df["close"][2]*100) % 10)
# LOG.warning(int(df["close"][2]*100) % 10)
# Algo
sub_df = df[-(EARLY+SETUP+INTER):]
# action = end_nine(sub_df)
action = algo_DeMark_lite(sub_df)
if action == None:
return None
elif action == -1: # up
detail_dict = {}
detail_dict["ticker"] = ticker
detail_dict["open_price"] = sub_df["close"][-3:].mean() #tmp
detail_dict["open_time"] = baseline_now
detail_dict["direction"] = True
detail_dict["expect_price"] = sub_df["close"][-7:-1].mean() #tmp
detail_dict["dump_price"] = sub_df["close"][-1]-(sub_df["close"][-2]-sub_df["close"][-1])
detail_dict["dump_time"] = baseline_now + datetime.timedelta(minutes=5)
elif action == 1:
detail_dict = {}
detail_dict["ticker"] = ticker
detail_dict["open_price"] = sub_df["close"][-3:].mean() #tmp
detail_dict["open_time"] = baseline_now
detail_dict["direction"] = False
detail_dict["expect_price"] = sub_df["close"][-7:-1].mean() #tmp
detail_dict["dump_price"] = sub_df["close"][-1]-(sub_df["close"][-2]-sub_df["close"][-1])
detail_dict["dump_time"] = baseline_now + datetime.timedelta(minutes=5)
else:
raise
return None
# miss more series, so here is the only one
detail_ss = | pd.Series(detail_dict,name=detail_dict["ticker"]) | pandas.Series |
from typing import Optional, List, overload, Union
from matplotlib.axes import Axes
from mpl_format.axes.axis_utils import new_axes
from networkx import DiGraph, draw_networkx_nodes, \
draw_networkx_edges, all_simple_paths, \
descendants_at_distance, draw_networkx_labels
from pandas import DataFrame
from probability.models.decision_tree.nodes import \
DecisionNode, ChanceNode, AmountNode
from probability.models.utils import distribute_about_center
class DecisionTree(object):
"""
A Probabilistic Decision Tree.
There are 3 types of nodes: DecisionNodes, ChanceNodes and AmountNodes.
* Each DecisionNode represents a Decision that needs to be made,
consisting of any number of different choices.
* Each ChanceNode represents a potential choice of a Decision, each with
a given probability of success.
* Each AmountNode represents either a Cost or a Reward if a choice is
successful.
"""
def __init__(self):
"""
Create a new Probabilistic Decision Tree.
"""
self._graph = DiGraph()
self._root_node: Optional[DecisionNode] = None
self._solved: bool = False
@property
def graph(self) -> DiGraph:
"""
Return the wrapped networkx DiGraph object.
"""
return self._graph
@property
def max_depth(self) -> int:
"""
Return the maximum depth of any DecisionNode in the Tree,
"""
decision_nodes = self.decision_nodes()
if len(decision_nodes):
return max([node.depth for node in decision_nodes])
else:
return 0
def _get_layout(self) -> dict:
"""
Generate a layout for the Tree.
"""
x_add = {
DecisionNode: 0,
ChanceNode: 1 / 3,
AmountNode: 2 / 3
}
y_add = {
DecisionNode: 0,
ChanceNode: 1 / 3,
AmountNode: 2 / 3
}
max_depth = self.max_depth
nodes = {}
for depth in range(1, max_depth + 1):
nodes[(DecisionNode, depth)] = self.decision_nodes(depth)
nodes[(ChanceNode, depth)] = self.chance_nodes(depth)
nodes[(AmountNode, depth)] = self.amount_nodes(depth)
max_width = max(len(value) for value in nodes.values())
layout = {}
for node in self._graph.nodes():
node_type = type(node)
node_list = nodes[(node_type, node.depth)]
x = (node.depth + x_add[node_type]) / (max_depth * 3)
y = distribute_about_center(
index=node_list.index(node),
size=len(node_list),
max_loc=max_width - 1,
max_size=max_width
) + y_add[node_type]
layout[node] = [x, y]
return layout
def decision_nodes(self, depth: Optional[int] = None) -> List[DecisionNode]:
"""
Return a list of all DecisionNodes in the DecisionTree.
"""
nodes = [node for node in self._graph.nodes()
if isinstance(node, DecisionNode)]
if depth is not None:
nodes = [node for node in nodes if node.depth == depth]
return nodes
def decision_node(
self, name: str, depth: Optional[int] = None
) -> DecisionNode:
decision_nodes = self.decision_nodes(depth=depth)
return [node for node in decision_nodes
if node.name == name][0]
def chance_nodes(self, depth: Optional[int] = None) -> List[ChanceNode]:
"""
Return a list of all ChanceNodes in the DecisionTree.
"""
nodes = [node for node in self._graph.nodes()
if isinstance(node, ChanceNode)]
if depth is not None:
nodes = [node for node in nodes if node.depth == depth]
return nodes
def chance_node(
self, name: str,
depth: Optional[int] = None
) -> ChanceNode:
chance_nodes = self.chance_nodes(depth=depth)
return [node for node in chance_nodes
if node.name == name][0]
def amount_nodes(self, depth: Optional[int] = None) -> List[AmountNode]:
"""
Return a list of all AmountNodes in the DecisionTree.
"""
nodes = [node for node in self._graph.nodes()
if isinstance(node, AmountNode)]
if depth is not None:
nodes = [node for node in nodes if node.depth == depth]
return nodes
def amount_node(
self, name: str,
depth: Optional[int] = None
) -> AmountNode:
amount_nodes = self.amount_nodes(depth=depth)
return [node for node in amount_nodes
if node.name == name][0]
def node(self, name: str, depth: Optional[int] = None):
"""
Return the node with the given name.
:param name: The name of the node.
:param depth: Optional depth filter in case node names are only unique
by depth.
"""
nodes = list([node for node in self._graph.nodes()
if node.name == name])
if depth is not None:
nodes = [node for node in nodes if node.depth == depth]
if len(nodes) == 1:
return nodes[0]
else:
if depth is None:
raise ValueError(f'{len(nodes)} matching nodes named {name}')
else:
raise ValueError(
f'{len(nodes)} matching nodes named {name} at depth {depth}'
)
def node_amounts_dict(self) -> dict:
"""
Return a dict mapping Nodes to their amounts.
"""
return {
node: node.str_amount
for node in self._graph.nodes()
}
def node_names_dict(self) -> dict:
"""
Return a dict mapping Nodes to their names.
"""
return {
node: node.name
for node in self._graph.nodes()
}
def node_names(self) -> List[str]:
"""
Return a list of all the Node names in the Tree.
"""
return [node.name for node in self._graph.nodes()]
@overload
def parent(self, node: AmountNode) -> ChanceNode:
pass
@overload
def parent(self, node: ChanceNode) -> DecisionNode:
pass
@overload
def parent(self, node: DecisionNode) -> Optional[ChanceNode]:
pass
def parent(self, node):
return list(self._graph.predecessors(node))[0]
@overload
def children(self, node: DecisionNode) -> List[ChanceNode]:
pass
@overload
def children(
self, node: ChanceNode
) -> List[Union[DecisionNode, AmountNode]]:
pass
def children(self, node):
return list(descendants_at_distance(self._graph, node, 1))
def add_decision_node(
self, decision_node: DecisionNode,
parent: Optional[ChanceNode] = None
) -> DecisionNode:
"""
Add a new DecisionNode to the Tree.
:param decision_node: The DecisionNode to add.
:param parent: The parent ChanceNode that triggers the DecisionNode
on failure. Leave as None if this is the first Decision.
"""
if parent is None and self._root_node is not None:
raise ValueError('Must give parent if tree already has a root node')
if decision_node.depth is None:
raise ValueError('DecisionNode must have depth assigned.')
if parent is not None and parent not in self._graph.nodes():
raise ValueError(f'ChanceNode {parent} is not in the Tree.')
self._graph.add_node(decision_node)
if parent is not None:
self._graph.add_edge(parent, decision_node)
else:
self._root_node = decision_node
self._solved = False
return decision_node
def add_decision(self, name: str, parent_name: Optional[str] = None):
"""
Add a new Decision to the Tree.
:param name: The name of the Decision.
:param parent_name: The parent ChanceNode that triggers the DecisionNode
on failure. Leave as None if this is the first
Decision.
"""
if name in self.node_names():
raise ValueError(f'{name} already exists in Tree')
parent: Optional[ChanceNode] = (
self.chance_node(parent_name) if parent_name is not None
else None
)
decision_node = DecisionNode(
name=name,
depth=(1 if parent_name is None
else parent.depth + 1)
)
self.add_decision_node(decision_node=decision_node,
parent=parent)
def add_chance_node(
self, chance_node: ChanceNode,
parent: DecisionNode
) -> ChanceNode:
"""
Add a new ChanceNode to the Tree. The ChanceNode represents the
probability of
:param chance_node: The ChanceNode to add.
:param parent: The DecisionNode that this ChanceNode belongs to.
"""
if chance_node.depth is None:
raise ValueError('ChanceNode must have depth assigned.')
if parent not in self._graph.nodes():
raise ValueError(f'DecisionNode {parent} is not in the Tree.')
self._graph.add_node(chance_node)
self._graph.add_edge(parent, chance_node)
self._solved = False
return chance_node
def add_option(
self, name: str,
p_success: float,
amount: float,
parent_name: str,
final: bool = False
):
"""
Add a new option to a Decision with associated ChanceNode, success and
optional failure AmountNodes.
:param name: The name of the Chance.
:param p_success: The probability of the Chance succeeding.
:param amount: The cost of choosing the option.
:param parent_name: The parent DecisionNode that the option belongs to.
:param final: Set to True to add a failure as well as a success
AmountNode.
"""
node_names = self.node_names()
if name in node_names:
raise ValueError(f'{name} already exists in Tree')
if parent_name not in node_names:
raise ValueError(
f'DecisionNode named {parent_name} is not in the Tree.'
)
decision_node = self.decision_node(name=parent_name)
chance_node = self.add_chance_node(
chance_node=ChanceNode(
name=name,
p_success=p_success,
amount=amount,
depth=decision_node.depth
),
parent=decision_node
)
self.add_amount_node(
amount_node=AmountNode(
name=f'{name}.success',
probability=p_success,
depth=decision_node.depth
),
parent=chance_node
)
if final:
self.add_amount_node(
amount_node=AmountNode(
name=f'{name}.failure',
probability=1 - p_success,
depth=decision_node.depth
),
parent=chance_node
)
def add_amount_node(
self, amount_node: AmountNode,
parent: ChanceNode
) -> AmountNode:
"""
Add a new AmountNode to the Tree.
:param amount_node: The AmountNode to add.
:param parent: The ChanceNode associated with this AmountNode,
if successful.
"""
if amount_node.depth is None:
raise ValueError('AmountNode must have depth assigned.')
if parent not in self._graph.nodes():
raise ValueError(f'ChanceNode {parent} is not in the Tree.')
self._graph.add_node(amount_node)
self._graph.add_edge(parent, amount_node)
self._solved = False
return amount_node
def solve(self, minimize: bool = True):
"""
Solve the Decision Tree.
:param minimize: True to minimize amounts, i.e. amounts are costs, or
False to maximize amounts, i.e. amounts are rewards.
"""
if minimize:
opt_func = min
else:
opt_func = max
# 1) at each end point of the tree write down the net total cost
# incurred if that end point is reached
for amount_node in self.amount_nodes():
path_to_node = list(all_simple_paths(
self._graph, self._root_node, amount_node
))[0] # path from first decision node to amount node
total_amount = 0
for node in path_to_node:
if isinstance(node, ChanceNode):
total_amount += node.amount
amount_node: AmountNode = path_to_node[-1]
amount_node.total_amount = total_amount
# 2) work backwards computing the expected cost at all nodes and
# choosing action at choice nodes where expected cost is lowest
for depth in range(self.max_depth, 0, -1):
for amount_node in self.amount_nodes(depth):
# propagate expected payoff to parent chance node
parent = self.parent(amount_node)
parent.expected_amount += (
amount_node.probability * amount_node.total_amount
)
for decision_node in self.decision_nodes(depth):
# select minimum cost from child chance nodes
children = self.children(decision_node)
decision_node.expected_amount = opt_func([
child.expected_amount for child in children
])
if depth > 1:
parent = self.parent(decision_node)
parent.expected_amount += (
parent.p_failure * decision_node.expected_amount
)
self._solved = True
def amounts(self, require_success: bool = False) -> DataFrame:
"""
Return the amounts in the solved DecisionTree.
:param require_success: Set to True to only calculate amounts where
success is guaranteed.
"""
if not self._solved:
raise PermissionError(
"Can't calculate amounts for an unsolved tree."
)
results = []
for amount_node in self.amount_nodes():
path_to_node = list(all_simple_paths(
self._graph, self._root_node, amount_node
))[0]
chance_nodes = [c for c in path_to_node
if isinstance(c, ChanceNode)]
if chance_nodes[-1].p_success != 1 and require_success:
continue
result = {}
for c, node in enumerate(chance_nodes):
result[f'choice_{c + 1}'] = str(node.name)
result[f'amount_{c + 1}'] = node.amount
result[f'expected_amount_{c + 1}'] = node.expected_amount
results.append(result)
return | DataFrame(results) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_items(items, columns=columns, orient=orient)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ge, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.ge(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return self.ftypes.value_counts().sort_index()
def get_value(self, index, col, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.get_value, index, col, takeable=takeable
)
def get_values(self):
return self._default_to_pandas(pandas.DataFrame.get_values)
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.gt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.gt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def head(self, n=5):
"""Get the first n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the first n rows of the DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.head(n))
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs
)
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmax(axis=axis, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmin(axis=axis, skipna=skipna)
def infer_objects(self):
return self._default_to_pandas(pandas.DataFrame.infer_objects)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Args:
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns:
Prints the summary of a DataFrame and returns None.
"""
# We will default to pandas because it will be faster than doing two passes
# over the data
buf = sys.stdout if not buf else buf
import io
with io.StringIO() as tmp_buf:
self._default_to_pandas(
pandas.DataFrame.info,
verbose=verbose,
buf=tmp_buf,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts,
)
result = tmp_buf.getvalue()
result = result.replace(
"pandas.core.frame.DataFrame", "modin.pandas.dataframe.DataFrame"
)
buf.write(result)
return None
index = self.index
columns = self.columns
dtypes = self.dtypes
# Set up default values
verbose = True if verbose is None else verbose
buf = sys.stdout if not buf else buf
max_cols = 100 if not max_cols else max_cols
memory_usage = True if memory_usage is None else memory_usage
if not null_counts:
if len(columns) < 100 and len(index) < 1690785:
null_counts = True
else:
null_counts = False
# Determine if actually verbose
actually_verbose = True if verbose and max_cols > len(columns) else False
if type(memory_usage) == str and memory_usage == "deep":
memory_usage_deep = True
else:
memory_usage_deep = False
# Start putting together output
# Class denoted in info() output
class_string = "<class 'modin.pandas.dataframe.DataFrame'>\n"
# Create the Index info() string by parsing self.index
index_string = index.summary() + "\n"
if null_counts:
counts = self._query_compiler.count()
if memory_usage:
memory_usage_data = self._query_compiler.memory_usage(
deep=memory_usage_deep, index=True
)
if actually_verbose:
# Create string for verbose output
col_string = "Data columns (total {0} columns):\n".format(len(columns))
for col, dtype in zip(columns, dtypes):
col_string += "{0}\t".format(col)
if null_counts:
col_string += "{0} not-null ".format(counts[col])
col_string += "{0}\n".format(dtype)
else:
# Create string for not verbose output
col_string = "Columns: {0} entries, {1} to {2}\n".format(
len(columns), columns[0], columns[-1]
)
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + "\n"
# Create memory usage string
memory_string = ""
if memory_usage:
if memory_usage_deep:
memory_string = "memory usage: {0} bytes".format(memory_usage_data)
else:
memory_string = "memory usage: {0}+ bytes".format(memory_usage_data)
# Combine all the components of the info() output
result = "".join(
[class_string, index_string, col_string, dtypes_string, memory_string]
)
# Write to specified output buffer
buf.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
else:
if not is_list_like(value):
value = np.full(len(self.index), value)
if not isinstance(value, pandas.Series) and len(value) != len(self.index):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
downcast=None,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast,
**kwargs
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
index_iter = iter(self.index)
def iterrow_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.iterrows()
partition_iterator = PartitionIterator(self._query_compiler, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
col_iter = iter(self.columns)
def items_builder(df):
df.columns = [next(col_iter)]
df.index = self.index
return df.items()
partition_iterator = PartitionIterator(self._query_compiler, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
index_iter = iter(self.index)
def itertuples_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.itertuples(index=index, name=name)
partition_iterator = PartitionIterator(
self._query_compiler, 0, itertuples_builder
)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.join,
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
if isinstance(other, pandas.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
pandas.DataFrame(columns=self.columns).join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
other._query_compiler,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
# See note above about error checking with an empty join.
pandas.DataFrame(columns=self.columns).join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
[obj._query_compiler for obj in other],
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurt,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurtosis,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def last(self, offset):
return self._default_to_pandas(pandas.DataFrame.last, offset)
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.le, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.le(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.lt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.lt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mad(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.mad, axis=axis, skipna=skipna, level=level
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mask,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.max(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.mean(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.median(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return self._default_to_pandas(
pandas.DataFrame.melt,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
result = self._query_compiler.memory_usage(index=index, deep=deep)
result.index = self.columns
if index:
index_value = self.index.memory_usage(deep=deep)
return pandas.Series(index_value, index=["Index"]).append(result)
return result
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
right: The DataFrame to merge against.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(right, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type "
"{}".format(type(right))
)
if left_index is False or right_index is False:
if isinstance(right, DataFrame):
right = right._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.merge,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.min(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mode(self, axis=0, numeric_only=False):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only
)
)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mul,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mul(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def multiply(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis="columns", level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ne, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.ne(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def nlargest(self, n, columns, keep="first"):
return self._default_to_pandas(pandas.DataFrame.nlargest, n, columns, keep=keep)
def notna(self):
"""Perform notna across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notnull())
def nsmallest(self, n, columns, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.nsmallest, n, columns, keep=keep
)
def nunique(self, axis=0, dropna=True):
"""Return Series with number of distinct
observations over requested axis.
Args:
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Returns:
nunique : Series
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.nunique(axis=axis, dropna=dropna)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.pct_change,
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs
)
def pipe(self, func, *args, **kwargs):
"""Apply func(self, *args, **kwargs)
Args:
func: function to apply to the df.
args: positional arguments passed into ``func``.
kwargs: a dictionary of keyword arguments passed into ``func``.
Returns:
object: the return type of ``func``.
"""
return com._pipe(self, func, *args, **kwargs)
def pivot(self, index=None, columns=None, values=None):
return self._default_to_pandas(
pandas.DataFrame.pivot, index=index, columns=columns, values=values
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
return self._default_to_pandas(
pandas.DataFrame.pivot_table,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
)
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs
):
return to_pandas(self).plot
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.pow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.pow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
prod : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
return self._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def product(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
product : Series or DataFrame (if level specified)
"""
return self.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
def check_dtype(t):
return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t)
if not numeric_only:
# If not numeric_only and columns, then check all columns are either
# numeric, timestamp, or timedelta
if not axis and not all(check_dtype(t) for t in self.dtypes):
raise TypeError("can't multiply sequence by non-int of type 'float'")
# If over rows, then make sure that all dtypes are equal for not
# numeric_only
elif axis:
for i in range(1, len(self.dtypes)):
pre_dtype = self.dtypes[i - 1]
curr_dtype = self.dtypes[i]
if not is_dtype_equal(pre_dtype, curr_dtype):
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(
pre_dtype, curr_dtype
)
)
else:
# Normally pandas returns this near the end of the quantile, but we
# can't afford the overhead of running the entire operation before
# we error.
if not any(is_numeric_dtype(t) for t in self.dtypes):
raise ValueError("need at least one array to concatenate")
# check that all qs are between 0 and 1
pandas.DataFrame()._check_percentile(q)
axis = pandas.DataFrame()._get_axis_number(axis)
if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)):
return DataFrame(
query_compiler=self._query_compiler.quantile_for_list_of_values(
q=q,
axis=axis,
numeric_only=numeric_only,
interpolation=interpolation,
)
)
else:
return self._query_compiler.quantile_for_single_value(
q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def radd(self, other, axis="columns", level=None, fill_value=None):
return self.add(other, axis, level, fill_value)
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
Equal values are assigned a rank that is the [method] of
the ranks of those values.
Args:
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
method: {'average', 'min', 'max', 'first', 'dense'}
Specifies which method to use for equal vals
numeric_only (boolean)
Include only float, int, boolean data.
na_option: {'keep', 'top', 'bottom'}
Specifies how to handle NA options
ascending (boolean):
Decedes ranking order
pct (boolean):
Computes percentage ranking of data
Returns:
A new DataFrame
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.rank(
axis=axis,
method=method,
numeric_only=numeric_only,
na_option=na_option,
ascending=ascending,
pct=pct,
)
)
def rdiv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rdiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rdiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.reindex,
labels=labels,
index=index,
columns=columns,
axis=axis,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
if index is not None:
new_query_compiler = self._query_compiler.reindex(
0,
index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
new_query_compiler = self._query_compiler
if columns is not None:
final_query_compiler = new_query_compiler.reindex(
1,
columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
final_query_compiler = new_query_compiler
return self._create_dataframe_from_compiler(final_query_compiler, not copy)
def reindex_axis(
self,
labels,
axis=0,
method=None,
level=None,
copy=True,
limit=None,
fill_value=np.nan,
):
return self._default_to_pandas(
pandas.DataFrame.reindex_axis,
labels,
axis=axis,
method=method,
level=level,
copy=copy,
limit=limit,
fill_value=fill_value,
)
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.reindex_like,
other,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
):
"""Alters axes labels.
Args:
mapper, index, columns: Transformations to apply to the axis's
values.
axis: Axis to target with mapper.
copy: Also copy underlying data.
inplace: Whether to return a new DataFrame.
level: Only rename a specific level of a MultiIndex.
Returns:
If inplace is False, a new DataFrame with the updated axes.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# We have to do this with the args because of how rename handles
# kwargs. It doesn't ignore None values passed in, so we have to filter
# them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we
# will use the results after.
kwargs["inplace"] = True
df_to_rename = pandas.DataFrame(index=self.index, columns=self.columns)
df_to_rename.rename(**kwargs)
if inplace:
obj = self
else:
obj = self.copy()
obj.index = df_to_rename.index
obj.columns = df_to_rename.columns
if not inplace:
return obj
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.name = mapper
else:
renamed.index.name = mapper
if not inplace:
return renamed
def _set_axis_name(self, name, axis=0, inplace=False):
"""Alter the name or names of the axis.
Args:
name: Name for the Index, or list of names for the MultiIndex
axis: 0 or 'index' for the index; 1 or 'columns' for the columns
inplace: Whether to modify `self` directly or return a copy
Returns:
Type of caller or None if inplace=True.
"""
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.set_names(name)
else:
renamed.index.set_names(name)
if not inplace:
return renamed
def reorder_levels(self, order, axis=0):
return self._default_to_pandas(
pandas.DataFrame.reorder_levels, order, axis=axis
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return self._default_to_pandas(
pandas.DataFrame.replace,
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
return self._default_to_pandas(
pandas.DataFrame.resample,
rule,
how=how,
axis=axis,
fill_method=fill_method,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
limit=limit,
base=base,
on=on,
level=level,
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into DataFrame columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO Implement level
if level is not None:
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.reset_index,
level=level,
drop=drop,
inplace=inplace,
col_level=col_level,
col_fill=col_fill,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
# Error checking for matching Pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
if (
not drop
and not isinstance(self.index, pandas.MultiIndex)
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
new_query_compiler = self._query_compiler.reset_index(drop=drop, level=level)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rfloordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rfloordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmod(self, other, axis="columns", level=None, fill_value=None):
"""Mod this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rmod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rmod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmul(self, other, axis="columns", level=None, fill_value=None):
return self.mul(other, axis, level, fill_value)
def rolling(
self,
window,
min_periods=None,
freq=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
return self._default_to_pandas(
pandas.DataFrame.rolling,
window,
min_periods=min_periods,
freq=freq,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
def round(self, decimals=0, *args, **kwargs):
"""Round each element in the DataFrame.
Args:
decimals: The number of decimals to round to.
Returns:
A new DataFrame.
"""
return DataFrame(
query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rpow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
# Check to make sure integers are not raised to negative integer powers
if (
is_integer_dtype(type(other))
and other < 0
and all(is_integer_dtype(t) for t in self.dtypes)
):
raise ValueError("Integers to negative integer powers are not allowed.")
new_query_compiler = self._query_compiler.rpow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rsub,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_time_only=True)
new_query_compiler = self._query_compiler.rsub(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
return self.truediv(other, axis, level, fill_value)
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, pandas.Series):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, string_types):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = | pandas.Series(weights, dtype="float64") | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from endaq.calc import rotation
@pytest.mark.parametrize(
'quat, euler',
[
((0., 0., 0., 1.), (0., 0., 0.)),
((0., 0., 0., -1.), (0., 0., 0.)),
((1., 0., 0., 0.), (np.pi, 0., 0.)),
((0., 1., 0., 0.), (np.pi, 0., np.pi)),
((0., 0., 1., 0.), (0., 0., np.pi)),
]
)
def test_quat_to_euler_data(quat, euler):
df = pd.DataFrame([quat], index=[0], columns=['X', 'Y', 'Z', 'W'])
target = | pd.DataFrame([euler], index=[0], columns=['x', 'y', 'z']) | pandas.DataFrame |
# data_functions.py
#!/usr/bin/env ml
# coding: utf-8
# Import libraries
import logging
import pandas as pd
import numpy as np
from pathlib import Path
import argparse
import gc
from scipy import stats
# REDUCE MEMORY USAGE
def reduce_mem_usage(df, verbose=False):
start_mem = df.memory_usage().sum() / 1024 ** 2
int_columns = df.select_dtypes(include=["int"]).columns
float_columns = df.select_dtypes(include=["float"]).columns
for col in int_columns:
df[col] = pd.to_numeric(df[col], downcast="integer")
for col in float_columns:
df[col] = pd.to_numeric(df[col], downcast="float")
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
print(
"Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)".format(
end_mem, 100 * (start_mem - end_mem) / start_mem
)
)
return df
# LOAD DATASET
def load_data(file_path, kind='csv'):
data = | pd.DataFrame([]) | pandas.DataFrame |
from collections import OrderedDict
import pandas as pd
import torch
from torch import nn
from torch.autograd import Variable
def get_names_dict(model):
"""
Recursive walk over modules to get names including path.
"""
names = {}
def _get_names(module, parent_name=''):
for key, module in module.named_children():
name = parent_name + '.' + key if parent_name else key
names[name] = module
if isinstance(module, torch.nn.Module):
_get_names(module, parent_name=name)
_get_names(model)
return names
def summarize_model(model, input_size, return_meta=False):
"""Summarizes torch model by showing trainable parameters and weights.
Parameters:
----------
model : {nn.Module}
The model to summarize.
input_size : {tuple}
The dimensions of the model input not including batch size.
return_meta : {bool}, optional
Whether or not to return some additional meta data of the
model compute from the summary (the default is False).
Returns
-------
pd.DataFrame
The model summary as a Pandas data frame.
---------
Example:
import torchvision.models as models
model = models.alexnet()
df = summarize_model(model=model, input_size=(3, 224, 224))
print(df)
name class_name input_shape output_shape n_parameters
1 features=>0 Conv2d (-1, 3, 224, 224) (-1, 64, 55, 55) 23296
2 features=>1 ReLU (-1, 64, 55, 55) (-1, 64, 55, 55) 0
...
"""
def get_settings(m):
c = m.__class__
s = {}
# Linear layers
if c is [nn.Linear, nn.Bilinear]:
s = '-'
# Convolutional layers
if c in [nn.Conv1d, nn.Conv2d, nn.Conv3d]:
s = {'stride': m.stride, 'padding': m.padding}
if c in [nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]:
s = {'stride': m.stride, 'padding': m.padding, 'output_padding': m.output_padding}
# Pooling layers
if c in [nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]:
s = {'kernel_size': m.kernel_size, 'stride': m.stride, 'padding': m.padding, 'dilation': m.dilation} #, 'ceil_mode'=False}
if c in [nn.MaxUnpool1d, nn.MaxUnpool2d, nn.MaxUnpool3d]:
s = {'kernel_size': m.kernel_size, 'stride': m.stride, 'padding': m.padding}
if c in [nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d]:
s = {'kernel_size': m.kernel_size, 'stride': m.stride, 'padding': m.padding, 'count_include_pad': m.count_include_pad}
# Padding layers
if c in [nn.ReflectionPad1d, nn.ReflectionPad2d, nn.ReplicationPad1d, nn.ReplicationPad2d, nn.ReplicationPad3d,
nn.ZeroPad2d, nn.ConstantPad1d, nn.ConstantPad2d, nn.ConstantPad3d]:
s = {'padding': m.padding}
if c in [nn.ConstantPad1d, nn.ConstantPad2d, nn.ConstantPad3d]:
s['value'] = m.value
# Recurrent layers
if c in [nn.RNN, nn.LSTM, nn.GRU, nn.RNNCell, nn.LSTMCell, nn.GRUCell]:
s = {'input_size': m.input_size, 'hidden_size': m.hidden_size,
'num_layers': m.num_layers, 'nonlinearity': m.nonlinearity,
'dropout': m.dropout, 'bidirectional': m.bidirectional,
'batch_first': m.batch_first}
# Dropout layers
if c in [nn.Dropout, nn.Dropout2d, nn.Dropout3d]:
s = {'p': m.p}
# Normalization layers
if c in [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]:
s = {'momentum': m.momentum, 'affine': m.affine}
# Activation functions
if c in [nn.ELU]:
s = {'alpha': c.alpha}
if c in [nn.Softplus]:
s = {'beta': c.beta, 'threshold': c.threshold}
# Embedding layers
s = s if len(s) > 0 else '-'
return s
def register_hook(module):
# Define hook
def hook(module, input, output):
name = ''
for key, item in names.items():
if item == module:
name = key
# Get class name and set module index
class_name = str(module.__class__).split('.')[-1].split("'")[0]
module_idx = len(summary)
m_key = module_idx + 1
# Prepare summary entry for this module
summary[m_key] = OrderedDict()
summary[m_key]['name'] = name
summary[m_key]['class_name'] = class_name
# Input and output shape
summary[m_key]['input_shape'] = (-1, ) + tuple(input[0].size())[1:]
summary[m_key]['output_shape'] = (-1, ) + tuple(output.size())[1:]
# Weight dimensions
summary[m_key]['weight_shapes'] = list([tuple(p.size()) for p in module.parameters()])
# Number of parameters in layers
summary[m_key]['n_parameters'] = sum([torch.LongTensor(list(p.size())).prod().item() for p in module.parameters()])
summary[m_key]['n_trainable'] = sum([torch.LongTensor(list(p.size())).prod().item() for p in module.parameters() if p.requires_grad])
# Get special settings for layers
summary[m_key]['settings'] = get_settings(module)
# Append
if not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model):
hooks.append(module.register_forward_hook(hook))
# Put model in evaluation mode (required for some modules {BN, DO, etc.})
was_training = model.training
if model.training:
model.eval()
# Names are stored in parent and path+name is unique not the name
names = get_names_dict(model)
# Check if there are multiple inputs to the network
if isinstance(input_size[0], (list, tuple)):
x = [Variable(torch.rand(1, *in_size)) for in_size in input_size]
else:
x = Variable(torch.rand(1, *input_size))
# Move parameters to CUDA if relevant
if next(model.parameters()).is_cuda:
x = x.cuda()
# Create properties
summary = OrderedDict()
hooks = []
# Register hook on all modules of model
model.apply(register_hook)
# Make a forward pass to evaluate registered hook functions
# and build summary
model(x)
# Remove all the registered hooks from the model again and
# return it in the state it was given.
for h in hooks:
h.remove()
# If the model was in training mode, put it back into training mode
if was_training:
model.train()
# Make dataframe
df_summary = pd.DataFrame.from_dict(summary, orient='index')
# Create additional info
if return_meta:
meta = {'total_parameters': df_summary.n_parameters.sum(),
'total_trainable': df_summary.n_trainable.sum(),
'layers': df_summary.shape[0],
'trainable_layers': (df_summary.n_trainable != 0).sum()}
df_meta = pd.DataFrame.from_dict(meta, orient='index')
return df_summary, df_meta
else:
return df_summary
if __name__ == '__main__':
import torchvision.models as models
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
import numpy as np
import pandas as pd
import uuid
METHODS = [
'add', 'sub', 'mul', 'floordiv', 'div', 'truediv', 'mod',
'divmod', 'pow', 'lshift', 'rshift', 'and', 'or', 'xor'
]
_df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import unicode_literals
import es_core_news_sm
import tweepy
import numpy
import argparse
import collections
import datetime
import re
import json
import sys
import os
import spacy
import nltk
import glob
import operator
import time
import itertools
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from transformers import pipeline
from ascii_graph import Pyasciigraph
from ascii_graph.colors import Gre, Yel, Red
from ascii_graph.colordata import hcolor
from spacy.lang.es.examples import sentences
from multiprocessing import Process
from tqdm import tqdm
from functools import partial
#from watson_developer_cloud import VisualRecognitionV3
from subject_classification_spanish import subject_classifier
from spellchecker import SpellChecker
cats_classifier = subject_classifier.SubjectClassifier()
#visual_recognition = VisualRecognitionV3(
# '2018-03-19',
# iam_apikey='-')
nlp_ = es_core_news_sm.load()
stopwords_ = set(stopwords.words('spanish'))
repl = partial(re.sub, '( |\n|\t)+', ' ')
leet_alph = {'0':'o', '1':'i', '3':'e', '5':'s', '4':'a', '7':'t', '8': 'b'}
regex = "/*[a-zA-ZñÑ$]*[0134578$][a-zA-ZñÑ]*"
regex2 = "https://t.co/\w*"
regex3 = "@[a-zA-Z0-9]*"
regex4 = "#[a-zA-Z0-9]*"
spell = SpellChecker(language='es')
#prog_ = re.compile("(@[A-Za-z0-9]+)|([^0-9A-Za-z' \t])|(\w+:\/\/\S+)")
#prog2_ = re.compile(" +")
#hashtags_ = re.compile("#(\w+)")
#regex_mentions_ = re.compile("@(\w+)")
#urls_ = re.compile("http(s)?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+")
regex_bad_words_ = re.compile("(" + "|".join(pd.concat([ | pd.read_csv(f) | pandas.read_csv |
#https://bit.ly/2NyxdAG
from bs4 import BeautifulSoup
import requests
import pandas as pd
url = 'https://www.imdb.com/search/title/?title_type=feature&primary_language=te&sort=num_votes,desc&view=advanced&start=%s'
movies = []
start = 1
index = 1
def add_movies_from_url(url):
global movies, start, index
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
movies_list = soup.find("div", class_="lister-list")
movies_list = movies_list.find_all("div", class_="lister-item-content")
for movie in movies_list:
name, director, actors, popularity, year, rating = None, None, [], None, None, None
try:
popularity = index
try:
year = movie.find_all('span')[1].text[1:-1]
except:
pass
try:
rating = movie.find('strong').text
except:
pass
try:
name = movie.find('a').text
except:
pass
try:
people = movie.find_all('p')[2].find_all('a')
except:
pass
try:
director = people[0].text
except:
pass
actors = [person.text for person in people[1:]] if len(people)>1 else []
except Exception as e:
print('Failed for - ', index, name)
movies.append([popularity, year, rating, name, director, actors])
index+=1
print('Done for the url - ', url)
for i in range(100):
add_movies_from_url(url%start)
start+=50
columns = ["popularity", "year", "rating", "name", "director", "actors"]
df = | pd.DataFrame(movies, columns=columns) | pandas.DataFrame |
from datetime import datetime, timedelta, date
import pandas as pd
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
import os.path
from tiingo import TiingoClient
import sys
import urllib.request as request
import csv
#from urllib.parse import urlencode
#%matplotlib inline
#plt.rcParams["figure.figsize"] = (10, 6) # (w, h)
#plt.ioff()
#import pandas_datareader.data as web
#from pandas_datareader import data, wb
#import pandas.io.data as web1 # Package and modules for importing data; this code may change depending on pandas version
#import requests
#from bs4 import BeautifulSoup
#import json
#import re
def tiingo_data(ticker, start, end):
config = {}
# To reuse the same HTTP Session across API calls (and have better performance), include a session key.
config['session'] = True
# Obtain Tiingo API Key
myFile = open("/Users/brittanythomas/PycharmProjects/SystemTraderV2/tiingoAPIkey.txt", "r")
myAPIkey = myFile.readline()
myFile.close()
config['api_key'] = myAPIkey
# Initialize
client = TiingoClient(config)
try:
print('Trying to pull Tiingo data for '+ticker)
df = client.get_dataframe(ticker,
startDate=start,
endDate=end,
frequency='daily')
except:
print("Unexpected error:", sys.exc_info()[0])
time.sleep(1)
try:
print('AGAIN - Trying to pull Tiingo data for ' + ticker)
df = client.get_dataframe(ticker,
startDate=start,
endDate=end,
frequency='daily')
except:
print('Could not pull Tiingo data for ' + ticker)
print("Unexpected error:", sys.exc_info()[0])
return None
return df
def momentum(closes):
returns = np.log(closes)
x = np.arange(len(returns))
slope, _, rvalue, _, _ = linregress(x, returns)
return ((1 + slope) ** 252) * (rvalue ** 2) # annualize slope and multiply by R^2
def main():
#Define system file path for where you have stored the constituents.csv and WIKI_PRICES.csv
sysFiles1 = '/Users/brittanythomas/PycharmProjects/SystemTraderV2/'
#Define system file path where momentum results should be stored
sysFiles2='/Users/brittanythomas/Library/Application Support/JetBrains/PyCharmCE2020.1/scratches/datas/'
#Open a txt file to save troubleshooting data as necessary
troubleshootFile1 = open(sysFiles1+'troubleshooting.txt','w+')
troubleshootFile1.write('test'+'\n')
skiped =[]
mydateparser = lambda x: datetime.strptime(x, "%Y-%m-%d")
constituents = pd.read_csv(sysFiles1 + 'constituents.csv', header=0,
names=['date_column', 'tickers'], parse_dates=['date_column'], date_parser=mydateparser)
#create a list of constituents from constituent.csv dataframe
conList = set([a for b in constituents.tickers.str.strip('[]').str.split(',') for a in b])
conList = [s.replace("'",'') for s in conList] #remove quotes from strings in list
conList = [s.strip() for s in conList]
conList = list(dict.fromkeys(conList)) #remove duplicates by converting conList into a dictionary and then back again
conList = sorted(conList) #sort conList alphabetically
start = "2015-10-01"
end = "2020-09-01"
momentums = pd.DataFrame(columns=['ticker','momentum'])
i=0
for ticker in conList:
if os.path.isfile(sysFiles2 + ticker + '.csv'):
try:
print(ticker + ' momentum exists - Adding to dictionary')
df = | pd.read_csv(sysFiles2+ticker+'.csv',header='infer', sep=' ') | pandas.read_csv |
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.fixture
def dataframe_with_duplicate_index():
"""Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
data = [['a', 'd', 'e', 'c', 'f', 'b'],
[1, 4, 5, 3, 6, 2],
[1, 4, 5, 3, 6, 2]]
index = ['h1', 'h3', 'h5']
columns = MultiIndex(
levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']],
codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
names=['main', 'sub'])
return DataFrame(data, index=index, columns=columns)
@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x],
lambda s, x: s.loc[:, x],
lambda s, x: s.xs(x, level=1)])
@pytest.mark.parametrize('level1_value, expected', [
(0, Series([1], index=[0])),
(1, Series([2, 3], index=[1, 2]))
])
def test_series_getitem_multiindex(access_method, level1_value, expected):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = access_method(s, level1_value)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('level0_value', ['D', 'A'])
def test_getitem_duplicates_multiindex(level0_value):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[[level0_value, 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
# confirm indexing on missing value raises KeyError
if level0_value != 'A':
msg = "'A'"
with pytest.raises(KeyError, match=msg):
df.val['A']
msg = "'X'"
with pytest.raises(KeyError, match=msg):
df.val['X']
result = df.val[level0_value]
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_getitem_simple(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data.T
expected = df.values[:, 0]
result = df['foo', 'one'].values
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('indexer,msg', [
(lambda df: df[('foo', 'four')], r"\('foo', 'four'\)"),
(lambda df: df['foobar'], "'foobar'")
])
def test_getitem_simple_key_error(
multiindex_dataframe_random_data, indexer, msg):
df = multiindex_dataframe_random_data.T
with pytest.raises(KeyError, match=msg):
indexer(df)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3],
lambda s: s.loc[2000, 3]
])
def test_series_getitem(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3, 10],
lambda s: s.loc[2000, 3, 10]
])
def test_series_getitem_returns_scalar(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.iloc[49]
result = indexer(s)
assert result == expected
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
])
def test_series_getitem_fancy(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,error,msg', [
(lambda s: s.__getitem__((2000, 3, 4)), KeyError, '356'),
(lambda s: s[(2000, 3, 4)], KeyError, '356'),
(lambda s: s.loc[(2000, 3, 4)], IndexingError, 'Too many indexers'),
(lambda s: s.__getitem__(len(s)), IndexError, 'index out of bounds'),
(lambda s: s[len(s)], IndexError, 'index out of bounds'),
(lambda s: s.iloc[len(s)], IndexError,
'single positional indexer is out-of-bounds')
])
def test_series_getitem_indexing_errors(
multiindex_year_month_day_dataframe_random_data, indexer, error, msg):
s = multiindex_year_month_day_dataframe_random_data['A']
with pytest.raises(error, match=msg):
indexer(s)
def test_series_getitem_corner_generator(
multiindex_year_month_day_dataframe_random_data):
s = multiindex_year_month_day_dataframe_random_data['A']
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_frame_getitem_multicolumn_empty_level():
df = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
df.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = df['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=df.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_getitem_tuple_plus_slice():
# GH 671
df = DataFrame({'a': np.arange(10),
'b': np.arange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)}
).set_index(['a', 'b'])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,expected_slice', [
(lambda df: df['foo'], slice(3)),
(lambda df: df['bar'], slice(3, 5)),
(lambda df: df.loc[:, 'bar'], slice(3, 5))
])
def test_getitem_toplevel(
multiindex_dataframe_random_data, indexer, expected_slice):
df = multiindex_dataframe_random_data.T
expected = df.reindex(columns=df.columns[expected_slice])
expected.columns = expected.columns.droplevel(0)
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
def test_getitem_int_raises_exception(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
msg = "3"
with pytest.raises(KeyError, match=msg):
df.loc.__getitem__(3)
def test_getitem_iloc(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.iloc[2]
expected = df.xs(df.index[2])
tm.assert_series_equal(result, expected)
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
# really a no-no
df = multiindex_dataframe_random_data.T
df['foo'].values[:] = 0
assert (df['foo'].values == 0).all()
def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df['foo']['one'] = 2
def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df['foo']['one'] = 2
result = df
tm.assert_frame_equal(result, expected)
def test_getitem_lowerdim_corner(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
# test setup - check key not in dataframe
with pytest.raises(KeyError, match="11"):
df.loc[('bar', 'three'), 'B']
# in theory should be inserting in a sorted space????
df.loc[('bar', 'three'), 'B'] = 0
expected = 0
result = df.sort_index().loc[('bar', 'three'), 'B']
assert result == expected
@pytest.mark.parametrize('unicode_strings', [True, False])
def test_mixed_depth_get(unicode_strings):
# If unicode_strings is True, the column labels in dataframe
# construction will use unicode strings in Python 2 (pull request
# #17099).
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
if unicode_strings:
arrays = [[u(s) for s in arr] for arr in arrays]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', ''].rename('a')
tm.assert_series_equal(result, expected)
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
expected = expected.rename(('routine1', 'result1'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer', [
lambda df: df.loc[:, ('A', 'A1')],
lambda df: df[('A', 'A1')]
])
def test_mi_access(dataframe_with_duplicate_index, indexer):
# GH 4145
df = dataframe_with_duplicate_index
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_mi_access_returns_series(dataframe_with_duplicate_index):
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
df = dataframe_with_duplicate_index
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df['A']['A1']
tm.assert_series_equal(result, expected)
def test_mi_access_returns_frame(dataframe_with_duplicate_index):
# selecting a non_unique from the 2nd level
df = dataframe_with_duplicate_index
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df['A']['B2']
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""
ADE20K Dataset, old version with scene subdirs
STEP 1 in a database preprocessing:
making a list of all input image and annotation files and saving it as .csv file
"""
import os
import scipy.io
import numpy as np
import pandas as pd
rootdir = "/media/sveta/DATASTORE/AI_ML_DL/Datasets/4_ADE20K/ADE20K_2016_07_26/"
out_anno_subdir = "anno_meta"
out_anno_csv = "anno_filelist.csv"
img_sets = {"images/training": "train", "images/validation": "val"}
seg_suffix = "_seg.png"
columns = ['set', 'subdirs', 'filename_img', 'filename_seg', 'train']
data_list = []
L_iss = len(img_sets)
for i_iss, img_set_subdir in enumerate(img_sets):
set_subdir = os.path.join(rootdir, img_set_subdir)
file_groups = list(os.walk(set_subdir))
L_fg = len(file_groups)
for i_fg, file_group in enumerate(file_groups):
print("{}/{} \t {}/{} \t processing.. ".format(i_iss+1, L_iss, i_fg+1, L_fg), end='')
if len(file_group[1]) > 0:
print('skipped')
continue
subdir = os.path.relpath(file_group[0], set_subdir)
for f in file_group[2]:
img_name, img_ext = os.path.splitext(f)
if img_ext != '.jpg':
continue
f_seg = img_name + seg_suffix
if f_seg in file_group[2]:
data_list.append([img_set_subdir, subdir, f, f_seg, img_sets[img_set_subdir]=="train"])
print('done')
data_list = | pd.DataFrame(data_list, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import csv
from time import *
from datetime import *
from io import StringIO
import bootstrapping
import BS
import dataSetConstruction
def rmse(a,b):
return np.sqrt(np.nanmean(np.square(a-b)))
def selectIndex(df, indexToKeep):
return df.loc[indexToKeep][ ~df.loc[indexToKeep].index.duplicated(keep='first') ]
def removeDuplicateIndex(df):
return selectIndex(df, df.index)
################################################################################ Parsing dat files
def parseDatFile(fileName):
s = open(fileName).read()
defPos=s.find("[option]")
finPos=s.find("[dividend]")
df = pd.read_csv(StringIO(s[defPos:finPos].replace("\n\n",";").replace("\n",",").replace(";",";\n")),decimal=".", sep=",", header=None)
matC = pd.to_numeric(df[1].str.split(pat="= ", expand=True)[1]).round(3)
strikeC = pd.to_numeric(df[3].str.split(pat="= ", expand=True)[1]).round()
priceC = pd.to_numeric(df[4].str.replace(";","").str.split(pat="= ", expand=True)[1])
typeC = pd.to_numeric(df[2].str.split(pat="= ", expand=True)[1])
formattedDat = pd.DataFrame([matC, strikeC, priceC, typeC], index = ["Maturity", "Strike", "Price", "Type"]).transpose().astype({"Type":"int32"})
filteredDat = formattedDat[formattedDat["Type"]==2]
return filteredDat
def parseModelParamDatFile(fileName):
s = open(fileName).read()
parts = s.split("\n\n")
number1 = parts[0]
repo = parts[1]
dates = parts[2]
interestRates = parts[3]
dividendRates = parts[4]
number2 = parts[5]
number3 = parts[6]
n = parts[7]
sigmaRef = parts[8]
h = parts[9]
sigmaMax = parts[10]
sigmaMin = parts[11]
number4 = parts[12]
underlying = parts[13]
def splitRow(row):
return np.array(row.split("\t")).astype(np.float)
tree = ("\n".join(parts[14:])).split("\n")
tree.remove("")
formattedTree = np.reshape(np.array(list(map(splitRow, tree))), (-1,3))
return pd.DataFrame(formattedTree, columns = ["date", "stock(%)", "vol"])
def parseImpliedVolDatFile(fileName):
s = open(fileName).read()
parts = s.split("\n\n")
def splitRow(row):
return np.array(row.split("\t")).astype(np.float)
testGrid = ("\n".join(parts)).split("\n")
testGrid.remove("")
formattedTestGrid = np.reshape(np.array(list(map(splitRow, testGrid))), (-1,4))
return pd.DataFrame(formattedTestGrid, columns=["Strike","Maturity","Implied vol.","Option price"])
def parseCalibrOutDatFile(fileName):
s = open(fileName).read()
parts = s.split("\n")
def splitRow(row):
return np.array(row.split("\t"))
def filterRow(row):
return len(row)==10
def formatRow(row):
return row.astype(np.float)
#tree = ("\n".join(parts)).split("\n")
#tree.remove("")
filteredTrainingData = list(filter(filterRow ,
list(map(splitRow, parts))))
formattedTrainingData = np.array(list(map(formatRow, filteredTrainingData)))
colNames = ["Active", "Option\ntype", "Maturity", "Strike", "Moneyness",
"Option\nprice", "Implied\nvol.", "Calibrated\nvol.","Market vol. -\nCalibrated vol.","MarketPrice"]
dfTrainingData = pd.DataFrame(formattedTrainingData, columns = colNames)
dfTrainingData["Active"] = dfTrainingData["Active"].astype(np.int)
dfTrainingData["Option\ntype"] = dfTrainingData["Option\ntype"].astype(np.int)
return dfTrainingData
def parseDatFiles(fileName):
s = open(fileName).read()
posUnderlying = s.find("[underlying]")
posZeroCoupon = s.find("[zero_coupon]")
posOption = s.find("[option]")
posDividend = s.find("[dividend]")
underlyingString = s[posUnderlying:posZeroCoupon]
zeroCouponString = s[posZeroCoupon:posOption]
optionString = s[posOption:posDividend]
dividendString = s[posDividend:-2]
def extractData(subStr, tag):
parts = subStr.replace(tag + "\n", "").split("\n\n")
try :
parts.remove("")
except ValueError:
#Not found, we continue
pass
def parseRow(row):
return (int(row.split(" = ")[1]) if (row.split(" = ")[0] == "type") else float(row.split(" = ")[1]))
def splitRow(row):
table = np.array(row.split("\n"))
parseTable = np.array(list(map(parseRow, table)))
return np.reshape(parseTable, (-1))
return np.array(list(map(splitRow, parts)))
underlying = pd.DataFrame(extractData(underlyingString, "[underlying]"),
columns=["S","Repo"])
zeroCoupon = pd.DataFrame(extractData(zeroCouponString, "[zero_coupon] "),
columns=["Maturity","Price"])
option = pd.DataFrame(extractData(optionString, "[option] "),
columns=["Maturity","Type", "Price", "Strike"])
option["Type"] = option["Type"].astype(np.int)
dividend = pd.DataFrame(extractData(dividendString, "[dividend] "),
columns=["Maturity","Amount"])
return underlying, zeroCoupon, dividend, option
###################################################################### data processing
def cleanData(zeroCouponCurve,
dividendCurve,
trainingData,
testingData,
underlyingNative,
localVolatilityNative):
dividendDf = dividendCurve.set_index('Maturity').sort_index()
dividendDf.loc[1.0] = 0.0
dividendDf.sort_index(inplace=True)
# Format zero coupon curve as a Pandas series
rateCurveDf = zeroCouponCurve.set_index('Maturity').sort_index()
# keep only rates expriring before 1 year
rateCurveDf = rateCurveDf.loc[rateCurveDf.index <= 1.01]
localVolatility = localVolatilityNative.dropna()
strikeCol = np.multiply(localVolatility["stock(%)"],
underlyingNative).copy()
localVolatility.insert(0, "Strike", strikeCol)
roundedDate = localVolatility["date"].round(decimals=3)
localVolatility = localVolatility.copy()
localVolatility.loc[:, "date"] = roundedDate
renameDict = {"date": "Maturity",
"vol": "LocalVolatility",
"stock(%)": "StrikePercentage"}
localVolatility = localVolatility.rename(columns=renameDict).set_index(["Strike", "Maturity"])
# Treatment for training data
filteredTestingData = testingData[(testingData["Implied vol."] > 0) & (testingData["Option price"] > 0)]
filteredTestingData = filteredTestingData.copy()
filteredTestingData.loc[:, "Maturity"] = filteredTestingData["Maturity"].round(decimals=3)
filteredTestingData.insert(0, "OptionType", np.ones_like(filteredTestingData["Maturity"]))
renameDict = {"Implied vol.": "ImpliedVol",
"Option price": "Price",
"Implied delta": "ImpliedDelta",
"Implied gamma": "ImpliedGamma",
"Implied theta": "ImpliedTheta",
"Local delta": "LocalDelta",
"Local gamma": "LocalGamma"}
formattedTestingData = filteredTestingData.rename(columns=renameDict).set_index(["Strike", "Maturity"])[
"ImpliedVol"]
# Treatment for testing data
filteredTrainingData = trainingData[(trainingData["Calibrated\nvol."] > 0) & (trainingData["Option\nprice"] > 0) & (
trainingData["Option\ntype"] == 2)]
roundedMat = filteredTrainingData["Maturity"].round(decimals=3)
filteredTrainingData = filteredTrainingData.copy()
filteredTrainingData.loc[:, "Maturity"] = roundedMat
renameDict = {"Option\ntype": "OptionType",
"Option\nprice": "Price",
"Calibrated\nvol.": "ImpliedVol", # "LocalImpliedVol",
"Implied\nvol.": "LocalImpliedVol"} # "ImpliedVol"}
formattedTrainingData = filteredTrainingData.drop(["Active", "Market vol. -\nCalibrated vol."], axis=1).rename(
columns=renameDict).set_index(["Strike", "Maturity"])
return dividendDf, rateCurveDf, localVolatility, removeDuplicateIndex(formattedTestingData), removeDuplicateIndex(formattedTrainingData)
def selectTrainingSet(rawData):
maturities = np.sort(np.unique(rawData.index.get_level_values("Maturity").values))
maxTrainingMaturities = maturities[-1] #maturities[-3]
filteredData = rawData[rawData.index.get_level_values("Maturity") <= maxTrainingMaturities]
trainingPercentage = 0.5
nbTrainingObs = int(rawData.shape[0] * trainingPercentage)
sampledPercentage = nbTrainingObs / filteredData.shape[0]
selectedTrainingRow = []
for maturityDf in filteredData.groupby(level="Maturity") :
if maturityDf[1].shape[0] > 1 :
nbPointsToDraw = max(int(maturityDf[1].shape[0] * sampledPercentage), 2)
selectedTrainingRow.append(maturityDf[1].sample(n=nbPointsToDraw, axis=0))
elif (maturityDf[0] == filteredData["Maturity"].min()) or (maturityDf[0] == filteredData["Maturity"].max()): #Keep all data
selectedTrainingRow.append(smile[1])
#trainingSet = filteredData.sample(n=nbTrainingObs, axis=0)
trainingSet = filteredData.loc[pd.concat(selectedTrainingRow).index]
testingSet = rawData.drop(trainingSet.index)
return trainingSet.sort_index(), testingSet.sort_index()
###################################################################### Main functions
def loadDataFromCSV(pathFolder, datFile):
zeroCouponCurve = pd.read_csv(pathFolder + "discount.csv",decimal=".").apply(pd.to_numeric)
dividendCurve = pd.read_csv(pathFolder + "dividend.csv",decimal=".").apply(pd.to_numeric)
trainingData = pd.read_csv(pathFolder + "dataTrain.csv",decimal=".").apply(pd.to_numeric)
testingData = pd.read_csv(pathFolder + "dataTest.csv",decimal=".").apply(pd.to_numeric)
underlyingNative = pd.read_csv(pathFolder + "underlying.csv",decimal=".").apply(pd.to_numeric)
localVolatilityNative = pd.read_csv(pathFolder + "locvol.csv",decimal=".").apply(pd.to_numeric)
filteredDat = parseDatFile(pathFolder + datFile + ".dat")
#return zeroCouponCurve, dividendCurve, trainingData, testingData, underlyingNative["S"].values[0], localVolatilityNative, filteredDat
S0 = underlyingNative["S"].values[0]
dividendDf, rateCurveDf, localVolatility, formattedTestingData, formattedTrainingData = cleanData(zeroCouponCurve,
dividendCurve,
trainingData,
testingData,
S0,
localVolatilityNative)
bootstrap = bootstrapping.bootstrapping(rateCurveDf, dividendDf, S0)
#return dividendDf, rateCurveDf, localVolatility, formattedTestingData, formattedTrainingData, S0, bootstrap
testingDataSet = dataSetConstruction.generateData(formattedTestingData,
S0,
bootstrap,
localVolatility)
#trainingDataSet = dataSetConstruction.generateData(formattedTrainingData["ImpliedVol"],
# S0,
# bootstrap,
# localVolatility,
# priceDf=filteredDat)
trainingDataSet = dataSetConstruction.generateData(formattedTrainingData["ImpliedVol"],
S0,
bootstrap,
localVolatility,
priceDf=formattedTrainingData.reset_index())
return trainingDataSet, testingDataSet, bootstrap, S0
def loadDataFromDat(pathFolder, datFileName):
localVolatilityNative = parseModelParamDatFile(pathFolder + datFileName + ".dat.modelparam.dat")
testingData = parseImpliedVolDatFile(pathFolder + datFileName + ".dat.impliedvol.dat")
trainingData = parseCalibrOutDatFile(pathFolder + datFileName + ".dat.calibr.out.dat")
underlyingNative, zeroCouponCurve, dividendCurve, filteredDat = parseDatFiles(pathFolder + datFileName + ".dat")
repricingError = rmse(trainingData["Option\nprice"], trainingData["MarketPrice"])
print("Tikhonov PDE repricing error on training set : ", repricingError)
#return zeroCouponCurve, dividendCurve, trainingData, testingData, underlyingNative["S"].values[0], localVolatilityNative, filteredDat
S0 = underlyingNative["S"].values[0]
dividendDf, rateCurveDf, localVolatility, formattedTestingData, formattedTrainingData = cleanData(zeroCouponCurve,
dividendCurve,
trainingData,
testingData,
S0,
localVolatilityNative)
bootstrap = bootstrapping.bootstrapping(rateCurveDf, dividendDf, S0)
#return dividendDf, rateCurveDf, localVolatility, formattedTestingData, formattedTrainingData, S0, bootstrap
testingDataSet = dataSetConstruction.generateData(formattedTestingData,
S0,
bootstrap,
localVolatility)
#trainingDataSet = dataSetConstruction.generateData(formattedTrainingData["ImpliedVol"],
# S0,
# bootstrap,
# localVolatility,
# priceDf=filteredDat)
trainingDataSet = dataSetConstruction.generateData(formattedTrainingData["ImpliedVol"],
S0,
bootstrap,
localVolatility,
priceDf=formattedTrainingData.reset_index())
return trainingDataSet, testingDataSet, bootstrap, S0
def loadCBOTData(pathFolder, fileName, asOfDate):
sheetName = "quotedata"
aDf = pd.read_excel(pathFolder + fileName,
header=None,
sheet_name = sheetName)
S0 = float(aDf.iloc[0, 1])
aDf = pd.read_excel(pathFolder + fileName,
header=2,
sheet_name = sheetName)
# aDf = aDf[aDf["Converted Expiration Date"] >= asOfDate]
def formatDate(date):
if isinstance(date, str):
return pd.to_datetime(date, format='%m/%d/%Y')
return pd.Timestamp(date.year, date.day, date.month)
aDf["Converted Expiration Date"] = aDf["Expiration Date"].map(lambda x: formatDate(x))
aDf = aDf[(aDf["Vol"] > 0.1) & (aDf["Last Sale"] > 0.01) & (aDf["Last Sale.1"] > 0.01) & (aDf["IV.1"] > 0.001) & (aDf["IV"] > 0.001)]
closeDate = aDf["Converted Expiration Date"]
strike = aDf["Strike"].round(decimals=3)
maturity = (aDf["Converted Expiration Date"] - pd.Timestamp(asOfDate)).map(lambda x: x.days / 365.25).round(decimals=3)
closeCall = (aDf["Bid"] + aDf["Ask"]) / 2 # aDf["Last Sale"]
bidCall = aDf["Bid"]
askCall = aDf["Ask"]
impliedVolCall = aDf["IV"]
deltaCall = aDf["Delta"]
gammaCall = aDf["Gamma"]
impliedVolPut = aDf["IV.1"]
closePut = (aDf["Bid.1"] + aDf["Ask.1"]) / 2 # aDf["Last Sale.1"]
bidPut = aDf["Bid.1"]
askPut = aDf["Ask.1"]
deltaPut = aDf["Delta.1"]
gammaPut = aDf["Gamma.1"]
callDf = pd.DataFrame(np.vstack(
[strike, maturity, closeCall, bidCall, askCall, impliedVolCall, deltaCall, gammaCall,
np.ones_like(deltaCall, dtype=np.int32)]).T,
columns=["Strike", "Maturity", "Price", "Bid", "Ask", "ImpliedVol", "Delta", "Gamma",
"OptionType"]).set_index(["Strike", "Maturity"])
PutDf = pd.DataFrame(np.vstack([strike, maturity, closePut, bidPut, askPut, impliedVolPut, deltaPut, gammaPut,
2 * np.ones_like(deltaCall, dtype=np.int32)]).T,
columns=["Strike", "Maturity", "Price", "Bid", "Ask", "ImpliedVol", "Delta", "Gamma",
"OptionType"]).set_index(["Strike", "Maturity"])
bootstrap = bootstrapping.bootstrappingAveraged(pathFolder + "yieldCurve.dat",
S0,
strike,
asOfDate,
closeCall,
closePut,
maturity)
rawData = pd.concat([callDf, PutDf])
rawData = rawData[rawData["OptionType"]==2]
#impvol = BS.vectorizedImpliedVolatilityCalibration(S0,
# bootstrap,
# rawData.index.get_level_values("Maturity"),
# rawData.index.get_level_values("Strike"),
# rawData["OptionType"],
# rawData["Price"])
#rawData["ImpliedVol"] = impvol
filteredData = removeDataViolatingStaticArbitrage(rawData.reset_index())
rawData = removeDuplicateIndex(filteredData.set_index(["Strike", "Maturity"]))
trainingSet, testingSet = selectTrainingSet(rawData)
trainingDataSet = dataSetConstruction.generateData(trainingSet["ImpliedVol"],
S0,
bootstrap,
localVolatilityRef = None,
priceDf=trainingSet.reset_index(),
spotValue = False)
testingDataSet = dataSetConstruction.generateData(testingSet["ImpliedVol"],
S0,
bootstrap,
localVolatilityRef = None,
priceDf=testingSet.reset_index(),
spotValue = False)
return trainingDataSet, testingDataSet, bootstrap, S0
def loadESXData(pathFolder, fileName, asOfDate):
#Stock
S0 = pd.read_excel(pathFolder + fileName,
header=0,
sheet_name="Underlying_spot")["S0"].values[0]
#Raw data
rawData = pd.read_excel(pathFolder + fileName,
header=0,
sheet_name="Put_Call_ask_price_all")
rawData["Strike"] = rawData["K"].astype(np.float).round(decimals=3)
rawData["Maturity"] = rawData["T"].round(decimals=3)
#Bootstrapping
bootstrap = bootstrapping.bootstrappingAveragedExcel(pathFolder + fileName,
S0,
rawData["Strike"],
asOfDate,
rawData["Call_price"],
rawData["Put_price"],
rawData["Maturity"])
callDf = pd.DataFrame(np.vstack([rawData["Call_price"].values, np.ones_like(rawData["Call_price"].values),
rawData["Strike"].values, rawData["Maturity"].values]).T,
index = rawData.index,
columns = ["Price", "OptionType", "Strike", "Maturity"])
putDf = pd.DataFrame(np.vstack([rawData["Put_price"].values, 2 * np.ones_like(rawData["Put_price"].values),
rawData["Strike"].values, rawData["Maturity"].values]).T,
index=rawData.index,
columns=["Price", "OptionType", "Strike", "Maturity"])
rawData = pd.concat([callDf, putDf])
impvol = BS.vectorizedImpliedVolatilityCalibration(S0,
bootstrap,
rawData["Maturity"],
rawData["Strike"],
rawData["OptionType"],
rawData["Price"])
rawData["ImpliedVol"] = impvol
rawData = removeDuplicateIndex(rawData.set_index(["Strike","Maturity"]))
trainingSet, testingSet = selectTrainingSet(rawData)
trainingDataSet = dataSetConstruction.generateData(trainingSet["ImpliedVol"],
S0,
bootstrap,
localVolatilityRef = None,
priceDf=trainingSet.reset_index(),
spotValue = False)
testingDataSet = dataSetConstruction.generateData(testingSet["ImpliedVol"],
S0,
bootstrap,
localVolatilityRef = None,
priceDf=testingSet.reset_index(),
spotValue = False)
return trainingDataSet, testingDataSet, bootstrap, S0
def loadGPLocVol(pathFolder, GPKernel, bootstrap, S0):
pathGP = pathFolder + ("local_vol_gaussian.csv" if GPKernel == "Gaussian" else "local_vol_matern_5_2.csv")
print("Loading local volatility from : ", pathGP)
locVolAresky = pd.read_csv(pathGP, decimal=".").apply(pd.to_numeric).dropna()
#locVolAresky["Strike"] = locVolAresky["K"].values
locVolAresky.insert(0, "Strike", locVolAresky["K"].values)
#locVolAresky["Maturity"] = locVolAresky["T"].round(decimals=3)
locVolAresky.insert(0, "Maturity", locVolAresky["T"].round(decimals=3))
renameDict = {"loc_vol": "LocalVolatility"}
locVolAreskyFormatted = locVolAresky.rename(columns=renameDict).set_index(["Strike", "Maturity"])
changedVarAresky = bootstrap.changeOfVariable(locVolAreskyFormatted["K"],
locVolAreskyFormatted["T"])
#locVolAreskyFormatted["Maturity"] = locVolAreskyFormatted["T"]
locVolAreskyFormatted.insert(0, "Maturity", locVolAreskyFormatted["T"].round(decimals=3))
#locVolAreskyFormatted["Strike"] = locVolAreskyFormatted["K"]
locVolAreskyFormatted.insert(0, "Strike", locVolAreskyFormatted["K"].round(decimals=3))
#locVolAreskyFormatted["ChangedStrike"] = pd.Series(changedVarAresky[0],
# index=locVolAreskyFormatted.index)
locVolAreskyFormatted.insert(0,
"ChangedStrike",
pd.Series(changedVarAresky[0], index=locVolAreskyFormatted.index))
#locVolAreskyFormatted["logMoneyness"] = np.log(locVolAreskyFormatted["ChangedStrike"] / S0)
locVolAreskyFormatted.insert(0, "logMoneyness",
np.log(locVolAreskyFormatted["ChangedStrike"] / S0))
locVolAreskyFormatted.insert(0, "OptionType",
-np.ones_like(locVolAreskyFormatted["ChangedStrike"]))
return locVolAreskyFormatted[~locVolAreskyFormatted.index.duplicated(keep='first')]
def loadGPLocVol(workingFolder, filename, bootstrap, S0):
#pathGP = pathFolder + ("local_vol_gaussian.csv" if GPKernel == "Gaussian" else "local_vol_matern_5_2.csv")
pathGP = workingFolder + filename
print("Loading local volatility from : ", pathGP)
locVolAresky = pd.read_excel(pathGP,
header=0,
sheet_name="Sheet1")
#locVolAresky["Strike"] = locVolAresky["K"].values
locVolAresky.insert(0, "Strike", locVolAresky["K"].round(decimals=3).values)
#locVolAresky["Maturity"] = locVolAresky["T"].round(decimals=3)
locVolAresky.insert(0, "Maturity", locVolAresky["T"].round(decimals=3))
renameDict = {"loc_vol": "LocalVolatility"}
locVolAreskyFormatted = locVolAresky.rename(columns=renameDict).set_index(["Strike", "Maturity"])
changedVarAresky = bootstrap.changeOfVariable(locVolAreskyFormatted["K"],
locVolAreskyFormatted["T"])
#locVolAreskyFormatted["Maturity"] = locVolAreskyFormatted["T"]
locVolAreskyFormatted.insert(0, "Maturity", locVolAreskyFormatted["T"].round(decimals=3))
#locVolAreskyFormatted["Strike"] = locVolAreskyFormatted["K"]
locVolAreskyFormatted.insert(0, "Strike", locVolAreskyFormatted["K"].round(decimals=3))
#locVolAreskyFormatted["ChangedStrike"] = pd.Series(changedVarAresky[0],
# index=locVolAreskyFormatted.index)
locVolAreskyFormatted.insert(0,
"ChangedStrike",
pd.Series(changedVarAresky[0], index=locVolAreskyFormatted.index))
#locVolAreskyFormatted["logMoneyness"] = np.log(locVolAreskyFormatted["ChangedStrike"] / S0)
locVolAreskyFormatted.insert(0, "logMoneyness",
np.log(locVolAreskyFormatted["ChangedStrike"] / S0))
locVolAreskyFormatted.insert(0, "OptionType",
-np.ones_like(locVolAreskyFormatted["ChangedStrike"]))
return locVolAreskyFormatted[~locVolAreskyFormatted.index.duplicated(keep='first')]
def removeDataViolatingStaticArbitrageStep(df):
arbitrableRows = []
for strike in df.rename({"Strike": "StrikeColumn"}, axis=1).groupby("StrikeColumn"):
impliedTotVariance = np.square(strike[1]["ImpliedVol"]) * strike[1]["Maturity"]
sortedStrike = pd.Series(impliedTotVariance.values,
index=strike[1]["Maturity"].values).sort_index().diff().dropna()
thetaViolation = sortedStrike.diff().dropna()
if (thetaViolation < 0).sum() > 0:
arbitrableMaturities = thetaViolation[(thetaViolation < 0)].index
arbitrableRows.append(strike[1][ strike[1]["Maturity"].isin(arbitrableMaturities) ])
#plt.plot(strike[1]["Maturity"].values, impliedTotVariance.values, label=str(strike[0]))
#print("Strike : ", strike[0], " NbViolations : ", (thetaViolation < 0).sum())
#print((thetaViolation < 0))
#print(sortedStrike)
return df.drop(pd.concat(arbitrableRows).index) if (len(arbitrableRows) > 0) else df
def removeDataViolatingStaticArbitrage(df):
formerDf = df
condition = True
maxNbLoop = 500
iterNb = 0
while condition and (iterNb < maxNbLoop) :
dfStep = removeDataViolatingStaticArbitrageStep(formerDf)
condition = (dfStep.shape[0] < formerDf.shape[0]) and (dfStep.shape[0] > 0)
formerDf = dfStep
iterNb = iterNb + 1
return dfStep
def loadFormattedData(pathFolder):
S0 = 2859.53 #Hard coded value
trainingPath = pathFolder + "trainingDataSet.csv"
trainingDataset = pd.read_csv(trainingPath).set_index(["Strike", "Maturity"]).rename({"Strike.1": "Strike",
"Maturity.1": "Maturity"},
axis=1).sort_index()
testingPath = pathFolder + "testingDataSet.csv"
testingDataset = pd.read_csv(testingPath).set_index(["Strike", "Maturity"]).rename({"Strike.1": "Strike",
"Maturity.1": "Maturity"},
axis=1).sort_index()
bootstrappingPath = pathFolder + "dfCurve.csv"
dfCurve = | pd.read_csv(bootstrappingPath) | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = integer_array(ufunc(a.astype(float), 1))
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = integer_array(ufunc(1, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = integer_array(values)
msg = r"The 'reduce' method is not supported."
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = np.array(data, dtype=object)
expected[data.isna()] = None
expected = pa.array(expected, type=data.dtype.name.lower(), from_pandas=True)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
import pyarrow as pa
dtype = pd.UInt32Dtype()
result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64"))
expected = pd.array([1, 2, 3, 4, None], dtype="UInt32")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([1, 2, 1, pd.NA], dtype="Int64")
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=[1, 2, pd.NA], dtype="Int64")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
warnings.filterwarnings('ignore',category=DeprecationWarning)
from configparser import ConfigParser, MissingSectionHeaderError, NoSectionError,NoOptionError
import os, glob
import pandas as pd
from dtreeviz.trees import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import ClassificationReport
from xgboost.sklearn import XGBClassifier
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from sklearn.tree import export_graphviz
from subprocess import call
import shap
import pickle
import csv
import warnings
import numpy as np
import eli5
from eli5.sklearn import PermutationImportance
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import precision_recall_curve
import graphviz
import graphviz.backend
from dtreeviz.shadow import *
from sklearn import tree
from simba.drop_bp_cords import drop_bp_cords, GenerateMetaDataFileHeaders
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import cross_val_score, cross_validate
from simba.rw_dfs import *
from sklearn.feature_selection import RFECV
from simba.shap_calcs import shap_summary_calculations
import time
# import timeit
def trainmodel2(inifile):
configFile = str(inifile)
config = ConfigParser()
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
modelDir = config.get('SML settings', 'model_dir')
modelDir_out = os.path.join(modelDir, 'generated_models')
projectPath = config.get('General settings', 'project_path')
csv_dir_in, csv_dir_out = os.path.join(projectPath, 'csv', 'targets_inserted'), os.path.join(projectPath,'csv', 'machine_results')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
if not os.path.exists(modelDir_out):
os.makedirs(modelDir_out)
tree_evaluations_out = os.path.join(modelDir_out, 'model_evaluations')
if not os.path.exists(tree_evaluations_out):
os.makedirs(tree_evaluations_out)
try:
model_nos = config.getint('SML settings', 'No_targets')
data_folder = config.get('create ensemble settings', 'data_folder')
model_to_run = config.get('create ensemble settings', 'model_to_run')
classifierName = config.get('create ensemble settings', 'classifier')
train_test_size = config.getfloat('create ensemble settings', 'train_test_size')
except ValueError:
print('ERROR: Project_config.ini contains errors in the [create ensemble settings] or [SML settings] sections. Please check the project_config.ini file.')
features = pd.DataFrame()
def generateClassificationReport(clf, class_names):
try:
visualizer = ClassificationReport(clf, classes=class_names, support=True)
visualizer.score(data_test, target_test)
visualizerPath = os.path.join(tree_evaluations_out, str(classifierName) + '_classificationReport.png')
g = visualizer.poof(outpath=visualizerPath, clear_figure=True)
except KeyError:
print(('Warning, not enough data for classification report: ') + str(classifierName))
def generateFeatureImportanceLog(importances):
feature_importances = [(feature, round(importance, 2)) for feature, importance in
zip(feature_list, importances)]
feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True)
feature_importance_list = [('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]
feature_importance_list_varNm = [i.split(':' " ", 3)[1] for i in feature_importance_list]
feature_importance_list_importance = [i.split(':' " ", 3)[2] for i in feature_importance_list]
log_df = pd.DataFrame()
log_df['Feature_name'] = feature_importance_list_varNm
log_df['Feature_importance'] = feature_importance_list_importance
savePath = os.path.join(tree_evaluations_out, str(classifierName) + '_feature_importance_log.csv')
log_df.to_csv(savePath)
return log_df
def generateShapLog(trainingSet, target_train, feature_list, classifierName, shap_target_present_no,shap_target_absent_no, inifile):
print('Calculating SHAP scores for ' + str(len(trainingSet)) +' observations...')
trainingSet[classifierName] = target_train
targetTrainSet = trainingSet.loc[trainingSet[classifierName] == 1]
nonTargetTrain = trainingSet.loc[trainingSet[classifierName] == 0]
try:
targetsForShap = targetTrainSet.sample(shap_target_present_no, replace=False)
except ValueError:
print('Could not find ' + str(shap_target_present_no) + ' in dataset. Taking the maximum instead (' + str(len(targetTrainSet)) + ')')
targetsForShap = targetTrainSet.sample(len(targetTrainSet), replace=False)
nontargetsForShap = nonTargetTrain.sample(shap_target_absent_no, replace=False)
shapTrainingSet = pd.concat([targetsForShap, nontargetsForShap])
targetValFrame = shapTrainingSet.pop(classifierName).values
explainer = shap.TreeExplainer(clf, data=None, model_output='raw', feature_perturbation='tree_path_dependent')
expected_value = explainer.expected_value[1]
outputDfRaw = pd.DataFrame(columns=feature_list)
shapHeaders = feature_list.copy()
shapHeaders.extend(('Expected_value', 'Sum', 'Prediction_probability', str(classifierName)))
outputDfShap = pd.DataFrame(columns=shapHeaders)
counter = 0
for frame in range(len(shapTrainingSet)):
currInstance = shapTrainingSet.iloc[[frame]]
shap_values = explainer.shap_values(currInstance, check_additivity=False)
shapList = list(shap_values[0][0])
shapList = [x * -1 for x in shapList]
shapList.append(expected_value)
shapList.append(sum(shapList))
probability = clf.predict_proba(currInstance)
shapList.append(probability[0][1])
shapList.append(targetValFrame[frame])
currRaw = list(shapTrainingSet.iloc[frame])
outputDfRaw.loc[len(outputDfRaw)] = currRaw
outputDfShap.loc[len(outputDfShap)] = shapList
counter += 1
print('SHAP calculated for: ' + str(counter) + '/' + str(len(shapTrainingSet)) + ' frames')
outputDfShap.to_csv(os.path.join(tree_evaluations_out, 'SHAP_values_' + str(classifierName) + '.csv'))
print('Creating SHAP summary statistics...')
shap_summary_calculations(inifile, outputDfShap, classifierName, expected_value, tree_evaluations_out)
outputDfRaw.to_csv(os.path.join(tree_evaluations_out, 'RAW_SHAP_feature_values_' + str(classifierName) + '.csv'))
print('All SHAP data saved in project_folder/models/evaluations directory')
def perf_RFCVE(projectPath, RFCVE_CVs, RFCVE_step_size, clf, data_train, target_train, feature_list):
selector = RFECV(estimator=clf, step=RFCVE_step_size, cv=RFCVE_CVs, scoring='f1', verbose=1)
selector = selector.fit(data_train, target_train)
selectorSupport = selector.support_.tolist()
trueIndex = np.where(selectorSupport)
trueIndex = list(trueIndex[0])
selectedFeatures = [feature_list[i] for i in trueIndex]
selectedFeaturesDf = pd.DataFrame(selectedFeatures, columns=['Selected_features'])
savePath = os.path.join(tree_evaluations_out, 'RFECV_selected_features_' + str(classifierName) + '.csv')
selectedFeaturesDf.to_csv(savePath)
print('Recursive feature elimination results stored in ' + str(savePath))
def generateFeatureImportanceBarGraph(log_df, N_feature_importance_bars):
log_df['Feature_importance'] = log_df['Feature_importance'].apply(pd.to_numeric)
log_df['Feature_name'] = log_df['Feature_name'].map(lambda x: x.lstrip('+-').rstrip('Importance'))
log_df = log_df.head(N_feature_importance_bars)
ax = log_df.plot.bar(x='Feature_name', y='Feature_importance', legend=False, rot=90, fontsize=6)
figName = str(classifierName) + '_feature_bars.png'
figSavePath = os.path.join(tree_evaluations_out, figName)
plt.ylabel('Feature_importance (mean decrease impurity)')
plt.tight_layout()
plt.savefig(figSavePath, dpi=600)
plt.close('all')
def generateExampleDecisionTree(estimator):
dot_name = os.path.join(tree_evaluations_out, str(classifierName) + '_tree.dot')
file_name = os.path.join(tree_evaluations_out, str(classifierName) + '_tree.pdf')
export_graphviz(estimator, out_file=dot_name, filled=True, rounded=True, special_characters=False,
impurity=False,
class_names=class_names, feature_names=data_train.columns)
commandStr = ('dot ' + str(dot_name) + ' -T pdf -o ' + str(file_name) + ' -Gdpi=600')
call(commandStr, shell=True)
def generateMetaData(metaDataList):
metaDataFn = str(classifierName) + '_meta.csv'
metaDataPath = os.path.join(modelDir_out, metaDataFn)
metaDataHeaders = GenerateMetaDataFileHeaders()
with open(metaDataPath, 'w', newline='') as f:
out_writer = csv.writer(f)
out_writer.writerow(metaDataHeaders)
out_writer.writerow(metaDataList)
def computePermutationImportance(data_test, target_test, clf):
perm = PermutationImportance(clf, random_state=1).fit(data_test, target_test)
permString = (eli5.format_as_text(eli5.explain_weights(perm, feature_names=data_test.columns.tolist())))
permString = permString.split('\n', 9)[-1]
all_rows = permString.split("\n")
all_cols = [row.split(' ') for row in all_rows]
all_cols.pop(0)
fimp = [row[0] for row in all_cols]
errot = [row[2] for row in all_cols]
name = [row[4] for row in all_cols]
dfvals = pd.DataFrame(list(zip(fimp, errot, name)), columns=['A', 'B', 'C'])
fname = os.path.join(tree_evaluations_out, str(classifierName) + '_permutations_importances.csv')
dfvals.to_csv(fname, index=False)
def LearningCurve(features, targetFrame, shuffle_splits, dataset_splits):
newDataTargets = np.concatenate((target_train, target_test), axis=0)
newDataFeatures = np.concatenate((data_train, data_test), axis=0)
cv = ShuffleSplit(n_splits=shuffle_splits, test_size=train_test_size, random_state=0)
model = RandomForestClassifier(n_estimators=RF_n_estimators, max_features=RF_max_features, n_jobs=-1, criterion=RF_criterion, min_samples_leaf=RF_min_sample_leaf, bootstrap=True, verbose=0)
train_sizes, train_scores, test_scores = learning_curve(model, newDataFeatures, newDataTargets, cv=cv, scoring='f1', shuffle=True, n_jobs=-1, verbose=1, train_sizes=np.linspace(0.01, 1.0, dataset_splits))
train_sizes = np.linspace(0.01, 1.0, dataset_splits)
train_mean = np.mean(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_std = np.std(test_scores, axis=1)
learningCurve_df = pd.DataFrame()
learningCurve_df['Fraction Train Size'] = train_sizes
learningCurve_df['Train_mean_f1'] = train_mean
learningCurve_df['Test_mean_f1'] = test_mean
learningCurve_df['Train_std_f1'] = train_std
learningCurve_df['Test_std_f1'] = test_std
fname = os.path.join(tree_evaluations_out, str(classifierName) + '_learning_curve.csv')
learningCurve_df.to_csv(fname, index=False)
def dviz_classification_visualization(data_train, target_train, classifierName):
clf = tree.DecisionTreeClassifier(max_depth=5, random_state=666)
clf.fit(data_train, target_train)
svg_tree = dtreeviz(clf, data_train, target_train, target_name=classifierName, feature_names=data_train.columns, orientation="TD", class_names=[classifierName, 'not_' + classifierName], fancy=True, histtype='strip', X=None, label_fontsize=12, ticks_fontsize=8, fontname="Arial")
fname = os.path.join(tree_evaluations_out, str(classifierName) + 'fancy_decision_tree_example.svg')
svg_tree.save(fname)
# READ IN DATA FOLDER AND REMOVE ALL NON-FEATURE VARIABLES (POP DLC COORDINATE DATA AND TARGET DATA)
print('Reading in ' + str(len(glob.glob(data_folder + '/*.' + wfileType))) + ' annotated files...')
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
for file in filesFound:
df = read_df(file, wfileType)
df = df.dropna(axis=0, how='all')
df = df.dropna() # jj inserted this, delete if needed
features = features.append(df, ignore_index=True)
try:
features = features.set_index('scorer')
except KeyError:
pass
features = features.loc[:, ~features.columns.str.contains('^Unnamed')]
totalTargetframes = features[classifierName].sum()
try:
targetFrame = features.pop(classifierName).values
except KeyError:
print('Error: the dataframe does not contain any target annotations. Please check the csv files in the project_folder/csv/target_inserted folder')
features = features.fillna(0)
try:
features = drop_bp_cords(features, inifile)
except KeyError:
print('Could not drop bodypart coordinates, bodypart coordinates missing in dataframe')
target_names = []
loop = 1
for i in range(model_nos):
currentModelNames = 'target_name_' + str(loop)
currentModelNames = config.get('SML settings', currentModelNames)
if currentModelNames != classifierName:
target_names.append(currentModelNames)
loop += 1
print('# of models to be created: 1')
for i in range(len(target_names)):
currentModelName = target_names[i]
features.pop(currentModelName).values
class_names = class_names = ['Not_' + classifierName, classifierName]
feature_list = list(features)
print('# of features in dataset: ' + str(len(feature_list)))
# IF SET BY USER - PERFORM UNDERSAMPLING AND OVERSAMPLING IF SET BY USER
data_train, data_test, target_train, target_test = train_test_split(features, targetFrame, test_size=train_test_size)
under_sample_setting = config.get('create ensemble settings', 'under_sample_setting')
over_sample_setting = config.get('create ensemble settings', 'over_sample_setting')
trainDf = data_train
trainDf[classifierName] = target_train
targetFrameRows = trainDf.loc[trainDf[classifierName] == 1]
print('# of ' + str(classifierName) + ' frames in dataset: ' + str(totalTargetframes))
if under_sample_setting == 'Random undersample':
try:
print('Performing undersampling...')
under_sample_ratio = config.getfloat('create ensemble settings', 'under_sample_ratio')
nonTargetFrameRows = trainDf.loc[trainDf[classifierName] == 0]
nontargetFrameRowsSize = int(len(targetFrameRows) * under_sample_ratio)
nonTargetFrameRows = nonTargetFrameRows.sample(nontargetFrameRowsSize, replace=False)
trainDf = | pd.concat([targetFrameRows, nonTargetFrameRows]) | pandas.concat |
##############################################################################
#
# An example of positioning dataframes in a worksheet using Pandas and
# XlsxWriter.
#
# Copyright 2013-2017, <NAME>, <EMAIL>
#
from builtins import str
import pandas as pd
from openpyxl import load_workbook
import numpy as np
# Create some Pandas dataframes from some data.
df1 = pd.DataFrame({'Data': [11, 12, 13, 14,18]})
df2 = pd.DataFrame({'Data': [21, 22, 23, 24]})
df3 = pd.DataFrame({'Data': [31, 32, 33, 34]})
df4 = pd.DataFrame({'Data': [41, 42, 43, 44]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('pandas_positioning.xlsx', engine='xlsxwriter')
# Position the dataframes in the worksheet.
df1.to_excel(writer, sheet_name='Sheet1') # Default position, cell A1.
df2.to_excel(writer, sheet_name='Sheet1', startcol=3)
df3.to_excel(writer, sheet_name='Sheet1', startrow=6)
# It is also possible to write the dataframe without the header and index.
df4.to_excel(writer, sheet_name='Sheet1',
startrow=7, startcol=4, index_label='test', header=False, index=True)
# Close the Pandas Excel writer and output the Excel file.
worksheet = writer.sheets['Sheet1']
#number_of_rows = worksheet.nrows
worksheet.set_column(1,1000, 30)
writer.save()
# col: measure row: file
book = load_workbook('pandas_positioning.xlsx')
writer = pd.ExcelWriter('pandas_positioning.xlsx',engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
labels =['a','b','c','d','e']
measures=['iou','precision','recall','dice']
names = ['file1', 'file2', 'file3']
#data = {'measures':[measure*len(label) for measure in measures]}
res = np.random.randn(3,5)
data = {'iou':res,'precision':res,'recall':res,'dice':res}
row_index = np.asarray(names)
column_index = [measure+'_' + str(label) for measure in measures for label in labels ]
formated_data = {measure+'_' + str(label): data[measure][:,j] for measure in measures for j, label in enumerate(labels) }
df = pd.DataFrame.from_dict(formated_data)
df.index = pd.Index(row_index)
df = df[column_index]
df.to_excel(writer, sheet_name ='sheet2')
writer.save()
df.to_excel(writer, sheet_name ='sheet3')
writer.save()
# col: file row: measure
book = load_workbook('pandas_positioning.xlsx')
writer = pd.ExcelWriter('pandas_positioning.xlsx',engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
labels =['a','b','c','d','e']
measures=['iou','precision','recall','dice']
names = ['file1', 'file2', 'file3']
#data = {'measures':[measure*len(label) for measure in measures]}
data = {'iou':res,'precision':res,'recall':res,'dice':res}
column_index = np.asarray(names)
row_index = [measure+'_' + str(label) for measure in measures for label in labels ]
formated_data = {name: np.concatenate([data[measure][id] for measure in measures]).tolist() for id, name in enumerate(names) }
df = | pd.DataFrame.from_dict(formated_data) | pandas.DataFrame.from_dict |
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [pd.Timestamp("2019-01-01"), pd.NaT]})
return df
@pytest.fixture
def df_from_spark(spark):
from pyspark.sql import types
values = collections.OrderedDict(
{"int": [1, 2, None],
"smallint": [1, 2, None],
"bigint": [1, 2, None],
"bool": [True, False, None],
"single": [1.0, NaN, None],
"double": [1.0, NaN, None],
"str": ["foo", "bar", None],
"datetime": [datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
None],
"date": [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
None],
"map": [{"foo": "bar"}, {"bar": "foo"}, None],
"array": [[1, 2, 3], [3, 4, 5], None]}
)
data = list(zip(*values.values()))
c = types.StructField
columns = [c("int", types.IntegerType()),
c("smallint", types.ShortType()),
c("bigint", types.LongType()),
c("bool", types.BooleanType()),
c("single", types.FloatType()),
c("double", types.DoubleType()),
c("str", types.StringType()),
c("datetime", types.TimestampType()),
c("date", types.DateType()),
c("map", types.MapType(types.StringType(), types.StringType())),
c("array", types.ArrayType(types.IntegerType()))]
schema = types.StructType(columns)
return spark.createDataFrame(data, schema=schema)
def create_plain_frame(cols, rows, reverse_cols=False, reverse_rows=False):
"""Helper function to automatically create instances of PlainFrame.
`cols` contains typed column annotations like "col1:int".
"""
if reverse_cols:
cols = cols[::-1]
columns, dtypes = zip(*[col.split(":") for col in cols])
values = list(range(1, rows + 1))
mapping = {"str": list(map(str, values)),
"int": values,
"float": list(map(float, values)),
"bool": list([x % 2 == 0 for x in values]),
"datetime": ["2019-01-{:02} 10:00:00".format(x) for x in
values]}
data = [mapping[dtype] for dtype in dtypes]
data = list(zip(*data))
if reverse_rows:
data = data[::-1]
return PlainFrame.from_plain(data=data,
dtypes=dtypes,
columns=columns)
def create_plainframe_single(values, dtype):
"""Create some special scenarios more easily. Always assumes a single
column with identical name. Only values and dtype varies.
"""
data = [[x] for x in values]
dtypes = [dtype]
columns = ["name"]
return PlainFrame.from_plain(data=data, dtypes=dtypes, columns=columns)
def test_plainframe():
# incorrect instantiation with non tuples with non factory method
plain_column = PlainColumn.from_plain(name="int",
dtype="int",
values=[1, 2, 3])
# correct instantiation
PlainFrame(plaincolumns=(plain_column,))
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[plain_column])
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[1])
def test_plainframe_from_plain_pandas_empty():
# tests GH#29
df = PlainFrame.from_plain(data=[], columns=["col1:int", "col2:str"])
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "str"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
dfp = pd.DataFrame(columns=["col1", "col2"], dtype=int)
df = PlainFrame.from_pandas(dfp)
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "int"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
def test_plainframe_attributes(plainframe_missings):
df = plainframe_missings
col_values = lambda x: df.get_column(x).values
assert df.columns == ["int", "float", "bool", "str", "datetime"]
assert df.dtypes == ["int", "float", "bool", "str", "datetime"]
assert col_values("int") == (1, 2, NULL)
assert col_values("str") == ("string", "string2", NULL)
assert col_values("datetime")[0] == datetime.datetime(2019, 1, 1, 10)
def test_plainframe_modify():
# change single value
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([1, 1], "int")
assert df_origin.modify({"name": {1: 1}}) == df_target
# change multiple values
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([3, 3], "int")
assert df_origin.modify({"name": {0: 3, 1: 3}}) == df_target
# change multiple columns
df_origin = PlainFrame.from_plain(data=[[1, 2], ["a", "b"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
df_target = PlainFrame.from_plain(data=[[1, 1], ["a", "a"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
assert df_origin.modify({"int": {1: 1}, "str": {1: "a"}}) == df_target
def test_plainframe_modify_assertions():
# check incorrect type conversion
df = create_plainframe_single([1, 2], "int")
with pytest.raises(TypeError):
df.modify({"name": {0: "asd"}})
def test_plainframe_getitem_subset():
df = create_plain_frame(["col1:str", "col2:int", "col3:int"], 2)
df_sub = create_plain_frame(["col1:str", "col2:int"], 2)
cmp_kwargs = dict(assert_column_order=True,
assert_row_order=True)
# test list of strings, slice and string
df["col1", "col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1":"col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1"].assert_equal(df_sub["col1"], **cmp_kwargs)
# test incorrect type
with pytest.raises(ValueError):
df[{"col1"}]
# test invalid column name
with pytest.raises(ValueError):
df["non_existant"]
def test_plainframe_get_column():
df = create_plain_frame(["col1:str", "col2:int"], 2)
assert df.get_column("col1") is df.plaincolumns[0]
# check value error for non existent column
with pytest.raises(ValueError):
df.get_column("does_not_exist")
def test_plainframe_parse_typed_columns():
parse = PlainFrame._parse_typed_columns
# invalid splits
cols = ["col1:int", "col2"]
with pytest.raises(ValueError):
parse(cols)
# invalid types
cols = ["col1:asd"]
with pytest.raises(ValueError):
parse(cols)
# invalid abbreviations
cols = ["col1:a"]
with pytest.raises(ValueError):
parse(cols)
# correct types and columns
cols = ["col1:str", "col2:s",
"col3:int", "col4:i",
"col5:float", "col6:f",
"col7:bool", "col8:b",
"col9:datetime", "col10:d"]
names = ["col{}".format(x) for x in range(1, 11)]
dtypes = ["str", "str",
"int", "int",
"float", "float",
"bool", "bool",
"datetime", "datetime"]
result = (names, dtypes)
np_assert_equal(parse(cols), result)
def test_plainframe_from_plain():
# unequal elements per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1]],
columns=["a", "b"],
dtypes=["int", "int"])
# mismatch between number of columns and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a"],
dtypes=["int", "int"])
# mismatch between number of dtypes and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int"])
# incorrect dtypes
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bad_type"])
# type errors conversion
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bool"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["float", "int"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["str", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[True, 2],
[False, 2]],
columns=["a", "b"],
dtypes=["datetime", "int"])
# correct implementation should not raise
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "int"])
def test_plainframe_to_plain():
columns = dtypes = ["int", "float", "bool", "str"]
data = [[1, 1.1, True, "string"],
[2, 2, False, "string2"]]
pf = PlainFrame.from_plain(data=data, columns=columns, dtypes=dtypes)
expected = (data, columns, dtypes)
assert pf.to_plain() == expected
def test_plainframe_from_dict():
data = collections.OrderedDict(
[("col1:int", [1, 2, 3]),
("col2:s", ["a", "b", "c"])]
)
df = PlainFrame.from_dict(data)
# check correct column order and dtypes
np_assert_equal(df.columns, ("col1", "col2"))
np_assert_equal(df.dtypes, ["int", "str"])
# check correct values
np_assert_equal(df.get_column("col1").values, (1, 2, 3))
np_assert_equal(df.get_column("col2").values, ("a", "b", "c"))
def test_plainframe_to_dict():
df = create_plain_frame(["col2:str", "col1:int"], 2)
to_dict = df.to_dict()
keys = list(to_dict.keys())
values = list(to_dict.values())
# check column order and dtypes
np_assert_equal(keys, ["col2:str", "col1:int"])
# check values
np_assert_equal(values[0], ["1", "2"])
np_assert_equal(values[1], [1, 2])
def test_plainframe_from_pandas(df_from_pandas):
df = df_from_pandas
df_conv = PlainFrame.from_pandas(df)
# check int to int
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, 2)
# check bool to bool
assert df_conv.get_column("bool").dtype == "bool"
assert df_conv.get_column("bool").values == (True, False)
# check bool (object) to bool with nan
assert df_conv.get_column("bool_na").dtype == "bool"
assert df_conv.get_column("bool_na").values == (True, NULL)
# check float to float
assert df_conv.get_column("float").dtype == "float"
assert df_conv.get_column("float").values == (1.2, 1.3)
# check float to float with nan
assert df_conv.get_column("float_na").dtype == "float"
np_assert_equal(df_conv.get_column("float_na").values, (1.2, NaN))
# check str to str
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar")
# check str to str with nan
assert df_conv.get_column("str_na").dtype == "str"
assert df_conv.get_column("str_na").values == ("foo", NULL)
# check datetime to datetime
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# check datetime to datetime with nan
assert df_conv.get_column("datetime_na").dtype == "datetime"
assert df_conv.get_column("datetime_na").values == (
datetime.datetime(2019, 1, 1), NULL)
def test_plainframe_from_pandas_assertions_missings_cast():
# check mixed dtype raise
df = pd.DataFrame({"mixed": [1, "foo bar"]})
with pytest.raises(TypeError):
PlainFrame.from_pandas(df)
# check assertion for incorrect forces
# too many types provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["int", "str"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "str",
"dummy": "int"})
# invalid dtypes provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["not existant type"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "not existant type"})
# invalid column names provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"dummy": "str"})
# check int to forced int with nan
df = pd.DataFrame({"int": [1, np.NaN]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, NULL)
# check force int to float
df = pd.DataFrame({"int": [1, 2]})
df_conv = PlainFrame.from_pandas(df, dtypes=["float"])
assert df_conv.get_column("int").dtype == "float"
assert df_conv.get_column("int").values == (1.0, 2.0)
# check force float to int
df = pd.DataFrame({"float": [1.0, 2.0]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("float").dtype == "int"
assert df_conv.get_column("float").values == (1, 2)
# check force str to datetime
df = pd.DataFrame({"datetime": ["2019-01-01", "2019-01-02"]})
df_conv = PlainFrame.from_pandas(df, dtypes=["datetime"])
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# dtype object with strings and nan should pass correctly
df = pd.DataFrame({"str": ["foo", "bar", NaN]}, dtype=object)
df_conv = PlainFrame.from_pandas(df)
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar", NULL)
def test_plainframe_from_pandas_inspect_dtype():
inspect = ConverterFromPandas.inspect_dtype
# raise if incorrect type
ser = pd.Series("asd", dtype=object)
with pytest.raises(TypeError):
inspect(ser)
def test_plainframe_from_pandas_inspect_dtype_object():
inspect = ConverterFromPandas.inspect_dtype_object
# ensure string with missings
df = pd.DataFrame({"dummy": ["asd", NaN]})
conv = ConverterFromPandas(df)
assert conv.inspect_dtype_object("dummy") == "str"
# check incorrect dtype
df = pd.DataFrame({"dummy": ["asd", tuple([1, 2])]})
conv = ConverterFromPandas(df)
with pytest.raises(TypeError):
conv.inspect_dtype_object("dummy")
def test_plainframe_to_pandas(plainframe_standard):
from pandas.api import types
df = plainframe_standard.to_pandas()
assert types.is_integer_dtype(df["int"])
assert df["int"][0] == 1
assert df["int"].isnull().sum() == 0
assert types.is_float_dtype(df["float"])
assert df["float"].isnull().sum() == 0
assert df["float"][1] == 2.0
assert types.is_bool_dtype(df["bool"])
np_assert_equal(df["bool"][0], True)
assert df["bool"].isnull().sum() == 0
assert | types.is_object_dtype(df["str"]) | pandas.api.types.is_object_dtype |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 13:24:18 2020
@author: earne
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipperplots import (
get_any_idi,
get_side_idi,
get_content_idi,
get_chronogram_vals,
preproc_averaging
)
def format_avg_output(output, averaging):
if averaging == 'datetime':
output.index.name = 'Date'
elif averaging == 'time':
first = output.index[0]
output.index = [i - first for i in output.index]
output.index = (output.index.total_seconds()/3600).astype(int)
output.index.name = 'Hours Since {}:00'.format(str(first.hour))
elif averaging == 'elapsed':
output.index = output.index.astype(int)
output.index.name = 'Elapsed Hours'
return output
def drinkcount_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftCount' : df['LeftCount']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightCount' : df['RightCount']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c +'Count' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkcount_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftCount'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftCount' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightCount'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightCount' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Count' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def drinkduration_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftDuration' : df['LeftDuration']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightDuration' : df['RightDuration']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c+'Duration' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkduration_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftDuration'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftDuration' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightDuration'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightDuration' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Duration' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def interdrink_intervals(sippers, kde=True, logx=True,
combine=False, **kwargs):
if combine:
output = idi_onecurve(sippers, kde, logx, **kwargs)
else:
output = idi_multicurve(sippers, kde, logx, **kwargs)
return output
def idi_onecurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
combined = []
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
kde_df = kde_df.reindex(x)
kde_df['Values'] = y
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
bar_df = bar_df.reindex(bar_x)
bar_df['Values'] = bar_h
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
plt.close()
return bar_df, kde_df
def idi_multicurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
plot = sns.distplot(y, bins=bins, norm_hist=False, kde=kde)
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({sipper.filename : bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({sipper.filename : y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_byside(sippers, kde=True, logx=True, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for side in ['Left', 'Right']:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_side_idi(sipper, side)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({side:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({side:bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_bycontent(sippers, idi_content, kde=True, logx=True,
**kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for c in idi_content:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_content_idi(sipper, c, df=df)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({c:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({c:bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def drinkcount_chronogram(sipper, circ_left=True, circ_right=True,
circ_content=None, lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
to_plot = []
labels = []
if circ_left:
to_plot.append(df['LeftCount'])
labels.append('Left')
if circ_right:
to_plot.append(df['RightCount'])
labels.append('Right')
if circ_content:
for c in circ_content:
vals = sipper.get_content_values(c, 'Count', df=df)
if not vals.empty:
to_plot.append()
labels.append(c)
for i, series in enumerate(to_plot):
reindexed = get_chronogram_vals(series, lights_on, lights_off)
if reindexed.empty:
continue
label = labels[i]
temp = pd.DataFrame({label:reindexed}, index=reindexed.index)
output = output.join(temp, how='outer')
output.index.name = 'Hours Into Light Cycle'
return output
def drinkcount_chronogram_grouped(sippers, groups, circ_left=True, circ_right=True,
circ_content=None, circ_var='SEM', lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame(index=range(0,24))
output.index.name = 'Hours Into Light Cycle'
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if circ_left:
key = group + ' - Left'
vals = get_chronogram_vals(df['LeftCount'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_right:
key = group + ' - Right'
vals = get_chronogram_vals(df['RightCount'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_content:
for c in circ_content:
key = group + ' - ' + c
content_vals = sipper.get_content_values(c, 'Count', df)
if not content_vals.empty:
vals = get_chronogram_vals(content_vals,
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
for i, (label, data) in enumerate(to_plot.items()):
y = np.nanmean(data, axis=0)
for d in data:
output[label + ' - ' + d.name] = d
output[label + ' MEAN'] = y
if circ_var == 'SEM':
sem = stats.sem(data, axis=0, nan_policy='omit')
output[label + ' SEM'] = sem
elif circ_var == 'STD':
std = np.nanstd(data, axis=0)
output[label + ' STD'] = std
return output
def drinkduration_chronogram(sipper, circ_left=True, circ_right=True,
circ_content=None, lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
to_plot = []
labels = []
if circ_left:
to_plot.append(df['LeftDuration'])
labels.append('Left')
if circ_right:
to_plot.append(df['RightDuration'])
labels.append('Right')
if circ_content:
for c in circ_content:
vals = sipper.get_content_values(c, 'Duration', df=df)
if not vals.empty:
to_plot.append()
labels.append(c)
for i, series in enumerate(to_plot):
reindexed = get_chronogram_vals(series, lights_on, lights_off)
if reindexed.empty:
continue
label = labels[i]
temp = pd.DataFrame({label:reindexed}, index=reindexed.index)
output = output.join(temp, how='outer')
output.index.name = 'Hours Into Light Cycle'
return output
def drinkduration_chronogram_grouped(sippers, groups, circ_left=True, circ_right=True,
circ_content=None, circ_var='SEM', lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame(index=range(0,24))
output.index.name = 'Hours Into Light Cycle'
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if circ_left:
key = group + ' - Left'
vals = get_chronogram_vals(df['LeftDuration'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_right:
key = group + ' - Right'
vals = get_chronogram_vals(df['RightDuration'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_content:
for c in circ_content:
key = group + ' - ' + c
content_vals = sipper.get_content_values(c, 'Duration', df)
if not content_vals.empty:
vals = get_chronogram_vals(content_vals,
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
for i, (label, data) in enumerate(to_plot.items()):
y = np.nanmean(data, axis=0)
for d in data:
output[label + ' - ' + d.name] = d
output[label + ' MEAN'] = y
if circ_var == 'SEM':
sem = stats.sem(data, axis=0, nan_policy='omit')
output[label + ' SEM'] = sem
elif circ_var == 'STD':
std = np.nanstd(data, axis=0)
output[label + ' STD'] = std
return output
def side_preference(sipper, pref_side='Left', pref_metric='Count', pref_bins='1H',
**kwargs):
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
lcol = 'Left' + pref_metric
rcol = 'Right' + pref_metric
l_data = df[lcol].diff().resample(pref_bins, base=base).sum()
r_data = df[rcol].diff().resample(pref_bins, base=base).sum()
total = l_data + r_data
if pref_side == 'Left':
preference = l_data/total
else:
preference = r_data/total
preference *= 100
output = pd.DataFrame(preference)
output.columns = ['{} Preference (% Drink {})'.format(pref_side, pref_metric)]
return output
def content_preference(sipper, pref_content=[], pref_metric='Count', pref_bins='1H',
lights_on=7, lights_off=19, shade_dark=True, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
for i, c in enumerate(pref_content):
target = sipper.get_content_values(c, out=pref_metric, df=df)
if target.empty:
continue
target = target.diff().resample(pref_bins, base=base).sum()
other = sipper.get_content_values(c, out=pref_metric, df=df,
opposite=True)
other = other.diff().resample(pref_bins, base=base).sum()
if not target.empty and not other.empty:
preference = target / (target + other) * 100
temp = pd.DataFrame({c : preference}, index=preference.index)
output = output.join(temp, how='outer')
return output
def averaged_drinkcount(sippers, groups, averaging='datetime', avg_bins='1H',
avg_var='SEM', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
key = '{} - Left'.format(group)
vals = df['LeftCount'].diff().rename(sipper.basename)
to_plot[key].append(vals)
if show_right:
key = '{} - Right'.format(group)
vals = df['RightCount'].diff().rename(sipper.basename)
to_plot[key].append(vals)
if show_content:
for c in show_content:
key = '{} - {}'.format(group, c)
vals = sipper.get_content_values(c, out='Count',
df=df).diff()
if not vals.empty:
to_plot[key].append(vals.rename(sipper.basename))
for i, (label, data) in enumerate(to_plot.items()):
temp = pd.DataFrame()
processed = preproc_averaging(data, averaging=averaging,
avg_bins=avg_bins, agg='sum')
x = processed['x']
ys = processed['ys']
mean = np.nanmean(ys, axis=0)
temp = temp.reindex(x)
for y in ys:
temp['{} ({})'.format(y.name, label)] = y
temp['{} MEAN'.format(label)] = mean
if avg_var == 'SEM':
temp['{} SEM'.format(label)] = stats.sem(ys, axis=0, nan_policy='omit')
elif avg_var == 'STD':
temp['{} STD'.format(label)] = np.nanstd(ys, axis=0)
output = output.join(temp, how='outer')
return format_avg_output(output, averaging)
def cumulative_averaged_drinkcount(sippers, groups, avg_bins='1H',
avg_var='SEM', show_left=True, show_right=True,
show_content=[], **kwargs):
averaging = 'elapsed'
output = pd.DataFrame()
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
key = '{} - Left'.format(group)
vals = df['LeftCount'].rename(sipper.basename)
to_plot[key].append(vals)
if show_right:
key = '{} - Right'.format(group)
vals = df['RightCount'].rename(sipper.basename)
to_plot[key].append(vals)
if show_content:
for c in show_content:
key = '{} - {}'.format(group, c)
vals = sipper.get_content_values(c, out='Count',
df=df)
if not vals.empty:
to_plot[key].append(vals.rename(sipper.basename))
for i, (label, data) in enumerate(to_plot.items()):
temp = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare trips data."""
# pylint: disable=invalid-name
import os
import re
from glob import glob
from typing import Dict, List
from zipfile import ZipFile
import pandas as pd
import pandera as pa
import requests
from src.utils import log_prefect
trips_schema = pa.DataFrameSchema(
columns={
"TRIP_ID": pa.Column(pa.Int),
"TRIP__DURATION": pa.Column(pa.Int),
"START_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"START_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"START_STATION_NAME": pa.Column(pd.StringDtype()),
"END_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"END_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"END_STATION_NAME": pa.Column(pd.StringDtype()),
"BIKE_ID": pa.Column(pa.Int, nullable=True),
"USER_TYPE": pa.Column(
pd.StringDtype(),
checks=[
pa.Check(
lambda s: s.isin(["Annual Member", "Casual Member"]),
)
],
),
},
index=pa.Index(pa.Int),
)
urls_schema = pa.DataFrameSchema(
columns={
"url": pa.Column(pd.StringDtype()),
"name": pa.Column(pd.StringDtype()),
"format": pa.Column(pd.StringDtype()),
"state": pa.Column(pd.StringDtype()),
},
index=pa.Index(pa.Int),
)
get_data_status_schema = pa.DataFrameSchema(
columns={
"trips_file_name": pa.Column(pd.StringDtype()),
"last_modified_opendata": pa.Column(
pd.DatetimeTZDtype(tz="America/Toronto")
),
"parquet_file_exists": pa.Column( | pd.BooleanDtype() | pandas.BooleanDtype |
import os
import pathlib
from typing import Sequence, Union, List, Callable
import matplotlib
import matplotlib.cm
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
from pandas.tseries.offsets import MonthEnd
from covid_data import get_ifr, scrape_and_combine
from utils_pandas import cum2daily, cut_ages, decreasing, get_cycle, human_format, import_csv, increasing, normalise_to_total, \
rearrange, set_time_series_labels_2, topprov
from utils_scraping import remove_prefix, remove_suffix
from utils_thai import DISTRICT_RANGE, DISTRICT_RANGE_SIMPLE, AREA_LEGEND, AREA_LEGEND_SIMPLE, \
AREA_LEGEND_ORDERED, FIRST_AREAS, area_crosstab, get_provinces, join_provinces, thaipop
def plot_area(df: pd.DataFrame,
png_prefix: str,
cols_subset: Union[str, Sequence[str]],
title: str,
footnote: str = None,
legends: List[str] = None,
legend_pos: str = None,
legend_cols: int = 1,
kind: str = 'line',
stacked=False,
percent_fig: bool = False,
unknown_name: str = 'Unknown',
unknown_total: str = None,
unknown_percent=False,
ma_days: int = None,
cmap: str = 'tab20',
periods_to_plot=None,
actuals: List[str] = [],
highlight: List[str] = [],
box_cols: List[str] = [],
reverse_cmap: bool = False,
y_formatter: Callable[[float, int], str] = human_format,
clean_end=True,
between: List[str] = []) -> None:
"""Creates one .png file for several time periods, showing data in absolute numbers and percentage terms.
:param df: data frame containing all available data
:param png_prefix: file prefix (file suffix is '.png')
:param cols_subset: specify columns from the pandas DataFrame based on either a column name prefix or based on a
list of column names.
:param title: plot title
:param legends: legends to be used on the plots (line chart and percentage)
:param kind: the type of plot (line chart or area chart)
:param stacked: whether the line chart should use stacked lines
:param percent_fig: whether the percentage chart should be included
:param unknown_name: the column name containing data related to unknowns
:param unknown_total: the column name (to be created) with unknown totals
:param unknown_percent: to include unknowns in a percentage fig if enabled
:param ma_days: number of days used when computing the moving average
:param cmap: the matplotlib colormap to be used
:param reverse_cmap: whether the colormap should be reversed
:param highlight: cols to make thicker to highlight them
:param y_formatter: function to format y axis numbers
:param clean_end: remove days at end if there is no data (inc unknown)
:param between: columns to display as dashed
:param actuals: display non MA as dashed
"""
if type(cols_subset) is str:
cols = [c for c in df.columns if str(c).startswith(cols_subset)]
else:
cols = cols_subset
orig_cols = cols
plt.rcParams.update({
"font.size": 24,
"figure.titlesize": 30,
"figure.titleweight": "bold",
"axes.titlesize": 28,
"legend.fontsize": 24,
"axes.prop_cycle": get_cycle(cmap),
})
if len(cols) > 6:
plt.rcParams.update({"legend.fontsize": 18})
if actuals:
# display the originals dashed along side MA
if type(actuals) != list:
actuals = cols
else:
actuals = []
if ma_days:
ma_suffix = ' (MA)'
for c in cols:
df[f'{c}{ma_suffix}'] = df[c].rolling(ma_days, min_periods=int(ma_days / 2), center=True).mean()
cols = [f'{c}{ma_suffix}' for c in cols]
else:
ma_suffix = ''
# try to hone in on last day of "important" data. Assume first col
last_update = df[orig_cols[:1]].dropna().last_valid_index() # date format chosen: '05 May 2021'
# last_date_excl = df[cols].last_valid_index() # last date with some data (not inc unknown)
is_dates = hasattr(last_update, 'date')
if unknown_total:
if ma_days:
df[f'{unknown_total}{ma_suffix}'] = df[unknown_total].rolling(ma_days,
min_periods=int(ma_days / 2),
center=True).mean()
total_col = f'{unknown_total}{ma_suffix}'
unknown_col = f'{unknown_name}{ma_suffix}'
other_cols = set(cols) - set([unknown_col])
# TODO: should not be 0 when no unknown_total
df[unknown_col] = df[total_col].sub(df[other_cols].sum(axis=1), fill_value=None).clip(lower=0)
if unknown_col not in cols:
cols = cols + [unknown_col]
if percent_fig:
perccols = [
c for c in cols
if (not unknown_total or unknown_percent or c != unknown_col) and c not in (between + actuals)
]
for c in perccols:
df[f'{c} (%)'] = df[f'{c}'] / df[perccols].sum(axis=1) * 100
if unknown_total and not unknown_percent:
df[f'{unknown_name}{ma_suffix} (%)'] = 0
perccols = [f'{c} (%)' for c in perccols]
title = f'{title}\n'
if ma_days:
title = title + f'({ma_days} day rolling average) '
if is_dates:
title += f"Last Data: {last_update.date().strftime('%d %b %Y')}\n"
else:
title += f"Last Data: {last_update}\n"
title += 'https://djay.github.io/covidthailand - (CC BY)'
# if legends are not specified then use the columns names else use the data passed in the 'legends' argument
if legends is None:
legends = [remove_suffix(c, ma_suffix) for c in cols]
if unknown_total and unknown_name not in legends:
legends = legends + [unknown_name]
# if unknown_total:
# colormap = custom_cm(cmap, len(cols) + 1, 'lightgrey', flip=reverse_cmap)
# else:
# colormap = custom_cm(cmap, len(cols), flip=reverse_cmap)
# colormap = cmap
# drop any rows containing 'NA' if they are in the specified columns (=subset of all columns)
# df_clean = clip_dataframe(df_all=df, cols=cols, n_rows=10)
last_date_unknown = df[cols].last_valid_index() # last date with some data (inc unknown)
if clean_end:
df_clean = df.loc[:last_date_unknown]
else:
df_clean = df
if is_dates:
periods = {
'all': df_clean,
'1': df_clean[:'2020-06-01'],
'2': df_clean['2020-12-12':],
'3': df_clean['2021-04-01':],
'30d': df_clean.last('30d')
}
quick = os.environ.get('USE_CACHE_DATA', False) == 'True' # TODO: have its own switch
if periods_to_plot:
pass
elif quick:
periods_to_plot = ['all']
else:
periods_to_plot = set(periods.keys())
periods = {key: periods[key] for key in periods_to_plot}
else:
periods = {'all': df_clean}
for suffix, df_plot in periods.items():
if df_plot.empty:
continue
if percent_fig:
f, (a0, a1) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [4, 2]}, figsize=[20, 15])
else:
f, a0 = plt.subplots(figsize=[20, 12])
# plt.rcParams["axes.prop_cycle"] = get_cycle(colormap)
a0.set_prop_cycle(None)
if y_formatter is not None:
a0.yaxis.set_major_formatter(FuncFormatter(y_formatter))
areacols = [c for c in cols if c not in between]
if kind != "line":
df_plot.plot(ax=a0, y=areacols, kind=kind, stacked=stacked, legend='reverse')
linecols = between + actuals
else:
linecols = cols + actuals
# advance colour cycle so lines have correct next colour
for _ in range(len(areacols)):
next(a0._get_lines.prop_cycler)
for c in linecols:
style = "--" if c in [f"{b}{ma_suffix}" for b in between] + actuals else None
width = 5 if c in [f"{h}{ma_suffix}" for h in highlight] else None
df_plot.plot(ax=a0,
y=c,
use_index=True,
linewidth=width,
style=style,
kind="line",
zorder=4,
legend=c not in actuals,
x_compat=kind == 'bar' # Putting lines on bar plots doesn't work well
)
if box_cols and type(box_cols[0]) != list:
box_cols = [box_cols]
elif not box_cols:
box_cols = []
for dist in box_cols:
mins, maxes, avg = df_plot[dist].min(axis=1), df_plot[dist].max(axis=1), df_plot[dist].mean(axis=1)
a0.fill_between(df.index, mins, maxes, facecolor="orange", alpha=0.3, zorder=3, label=None, step=None)
avg.plot(ax=a0, color="orange", style="--", zorder=5, x_compat=kind == 'bar', legend=False)
# boxes = df_plot[box_cols].transpose()
# boxes.boxplot(ax=a0)
if kind == "bar" and is_dates:
set_time_series_labels_2(df_plot, a0)
a0.set_title(label=title)
if footnote:
plt.annotate(footnote, (0.99, 0), (0, -50),
xycoords='axes fraction',
textcoords='offset points',
va='top',
fontsize=15,
horizontalalignment='right')
handles, labels = a0.get_legend_handles_labels()
# we are skipping pandas determining which legends to show so do it manually. box lines are 'None'
# TODO: go back to pandas doing it.
handles, labels = zip(*[(h, l) for h, l in zip(*a0.get_legend_handles_labels()) if l not in actuals + ['None']])
leg = a0.legend(handles=handles,
labels=legends,
loc=legend_pos,
frameon=True,
edgecolor="black",
fancybox=True,
framealpha=0.5,
ncol=legend_cols)
for line in leg.get_lines():
line.set_linewidth(4.0)
if unknown_total:
a0.set_ylabel(unknown_total)
a0.xaxis.label.set_visible(False)
if percent_fig:
a1.set_prop_cycle(None)
df_plot.plot(ax=a1, y=perccols, kind='area', legend=False)
a1.set_ylabel('Percent')
a1.xaxis.label.set_visible(False)
plt.tight_layout()
path = os.path.join("outputs", f'{png_prefix}_{suffix}.png')
plt.savefig(path)
print("Plot:", path)
plt.close()
return None
def save_plots(df: pd.DataFrame) -> None:
print('======== Generating Plots ==========')
# matplotlib global settings
matplotlib.use('AGG')
plt.style.use('seaborn-whitegrid')
# create directory if it does not exists
pathlib.Path('./outputs').mkdir(parents=True, exist_ok=True)
# Computed data
# TODO: has a problem if we have local transmission but no proactive
# TODO: put somewhere else
walkins = | pd.DataFrame(df["Cases Local Transmission"] - df["Cases Proactive"], columns=['Cases Walkin']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = | pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") | pandas.array |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
| assert_invalid_addsub_type(dtarr, parr, msg) | pandas.tests.arithmetic.common.assert_invalid_addsub_type |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_wine
from pdb import set_trace as breakpoint
# from IPython.display import display
def enlarge(n):
'''
This function will multiple the input by 100
'''
return n * 100
class MyDataSplitter():
'''
This class implements a 3-way data split and outputs summary metrics.
'''
def __init__(self, df):
self.df = df
def train_validation_test_split(self, features, target,
train_size=0.7, val_size=0.1,
test_size=0.2, random_state=None,
shuffle=True):
'''
This function is a utility wrapper around the Scikit-Learn train_test_split that splits arrays or
matrices into train, validation, and test subsets.
Args:
X (Numpy array or DataFrame): This is a dataframe with features.
y (Numpy array or DataFrame): This is a pandas Series with target.
train_size (float or int): Proportion of the dataset to include in the train split (0 to 1).
val_size (float or int): Proportion of the dataset to include in the validation split (0 to 1).
test_size (float or int): Proportion of the dataset to include in the test split (0 to 1).
random_state (int): Controls the shuffling applied to the data before applying the split for reproducibility.
shuffle (bool): Whether or not to shuffle the data before splitting
Returns:
Train, test, and validation dataframes for features (X) and target (y).
'''
X_train_val, X_test, y_train_val, y_test = train_test_split(
self.X, self.y, test_size=test_size, random_state=random_state, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=val_size / (train_size + val_size),
random_state=random_state, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
def date_divider(self, date_col):
'''
Param df: dataframe object from the Pandas library, entire dataframe where the date_column is located is required
Param date_col: String value of the name of the date_column to be looked up in the passed dataframe
Return: modified dataframe with the new Year, Month, and Day columns attached to the end.
'''
converted_df = self.df.copy()
converted_df["Year"] = | pd.DatetimeIndex(converted_df[date_col]) | pandas.DatetimeIndex |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import numpy as np
import pandas as pd
# read input data
def read_data(path):
data = pd.read_csv(path)
data['date'] = pd.to_datetime(data['date'])
return data
# calculate active storage (initial state-current state) * 100 / initial state and total active storage and time to equilibrium
def calc_active_storage(data):
df_AS = pd.DataFrame()
df_AS['date'] = data['date']
AS = TtE = np.empty([0])
for i in range(1, data.shape[1]):
df_AS[data.columns[i]] = (data.iloc[0, i] - data.iloc[:, i]) * 100 / data.iloc[0, i]
AS = np.append(AS, df_AS.iloc[:, i].loc[~df_AS.iloc[:, i].isnull()].iloc[-1])
TtE = np.append(TtE, df_AS.iloc[:, i].loc[~df_AS.iloc[:, i].isnull()].shape[0]-1)
return [df_AS, AS, TtE]
# extract features
def extr_features(df_AS, AS, TtE):
df_Feature = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"Skipna argument is not supported in old-style")
def test_series_median_skipna_false1(self):
'''Verifies median implementation with skipna=False on a series with NA values'''
def test_impl(S):
return S.median(skipna=False)
hpat_func = hpat.jit(test_impl)
# np.inf is not NaN, so verify that a correct number is returned
S1 = pd.Series([2., 3., 5., np.inf, 5., 6., 7.])
self.assertEqual(hpat_func(S1), test_impl(S1))
# TODO: both return values are 'nan', but HPAT's is not np.nan, hence checking with
# assertIs() doesn't work - check if it's Numba relatated
S2 = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(np.isnan(hpat_func(S2)), np.isnan(test_impl(S2)))
def test_series_median_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.median()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
def test_series_argsort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.argsort().values
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_idxmin1(self):
def test_impl(A):
return A.idxmin()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_idxmin_str(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmin_str_idx(self):
def test_impl(S):
return S.idxmin(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_no(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_int(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3], [4, 45, 14])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_noidx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmin_idx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, -np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_idxmax1(self):
def test_impl(A):
return A.idxmax()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmax_str_idx(self):
def test_impl(S):
return S.idxmax(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmax_noidx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmax_idx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.nan, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_sort_values1(self):
def test_impl(A):
return A.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_sort_values_index1(self):
def test_impl(A, B):
S = pd.Series(A, B)
return S.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
# TODO: support passing Series with Index
# S = pd.Series(np.random.ranf(n), np.random.randint(0, 100, n))
A = np.random.ranf(n)
B = np.random.ranf(n)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_sort_values_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.sort_values()
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_shift(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.shift()
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(cfunc(), pyfunc())
def test_series_shift_unboxing(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_float64:
series = pd.Series(data)
pd.testing.assert_series_equal(cfunc(series), pyfunc(series))
def test_series_shift_full(self):
def pyfunc(series, periods, freq, axis, fill_value):
return series.shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
freq = None
axis = 0
for data in test_global_input_data_float64:
series = pd.Series(data)
for periods in [-2, 0, 3]:
for fill_value in [9.1, np.nan, -3.3, None]:
jit_result = cfunc(series, periods, freq, axis, fill_value)
ref_result = pyfunc(series, periods, freq, axis, fill_value)
pd.testing.assert_series_equal(jit_result, ref_result)
def test_series_shift_str(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method shift(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_fill_str(self):
def pyfunc(series, fill_value):
return series.shift(fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, fill_value='unicode')
msg = 'Method shift(). The object must be a number. Given fill_value: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_unsupported_params(self):
def pyfunc(series, freq, axis):
return series.shift(freq=freq, axis=axis)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, freq='12H', axis=0)
msg = 'Method shift(). Unsupported parameters. Given freq: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, freq=None, axis=1)
msg = 'Method shift(). Unsupported parameters. Given axis != 0'
self.assertIn(msg, str(raises.exception))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_str(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_int(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=[1, 2, 3, 4, 5, 6, 7])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_index1(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=[0, 1, 2])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_take_index_default(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_default_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_iterator_int(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([3, 2, 1, 5, 4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_float(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([0.3, 0.2222, 0.1756, 0.005, 0.4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_boolean(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([True, False])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_string(self):
def test_impl(A):
return [i for i in A]
A = pd.Series(['a', 'ab', 'abc', '', 'dddd'])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_one_value(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([5])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
@unittest.skip("Fails when NUMA_PES>=2 due to unimplemented sync of such construction after distribution")
def test_series_iterator_no_param(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return [i for i in A]
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_iterator_empty(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([np.int64(x) for x in range(0)])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_default_index(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
@unittest.skip("Implement drop_duplicates for Series")
def test_series_drop_duplicates(self):
def test_impl():
A = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
return A.drop_duplicates()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_quantile(self):
def test_impl():
A = pd.Series([1, 2.5, .5, 3, 5])
return A.quantile()
hpat_func = hpat.jit(test_impl)
np.testing.assert_equal(hpat_func(), test_impl())
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.quantile() parameter as a list unsupported")
def test_series_quantile_q_vector(self):
def test_series_quantile_q_vector_impl(S, param1):
return S.quantile(param1)
S = pd.Series(np.random.ranf(100))
hpat_func = hpat.jit(test_series_quantile_q_vector_impl)
param1 = [0.0, 0.25, 0.5, 0.75, 1.0]
result_ref = test_series_quantile_q_vector_impl(S, param1)
result = hpat_func(S, param1)
np.testing.assert_equal(result, result_ref)
@unittest.skip("Implement unique without sorting like in pandas")
def test_unique(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2, 1, 3, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_unique_sorted(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
S[2] = 0
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_unique_str(self):
def test_impl():
data = pd.Series(['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'])
return data.unique()
hpat_func = hpat.jit(test_impl)
# since the orider of the elements are diffrent - check count of elements only
ref_result = test_impl().size
result = hpat_func().size
np.testing.assert_array_equal(ref_result, result)
def test_series_groupby_count(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A, sort=False)
return grouped.count()
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
| pd.testing.assert_series_equal(result, ref_result) | pandas.testing.assert_series_equal |
import matplotlib.pyplot as plt
import mysql.connector
import numpy as np
import os
import pandas as pd
import seaborn as sns
import warnings
from statsmodels.stats.outliers_influence import variance_inflation_factor
warnings.filterwarnings('ignore')
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
class DataProcessor:
def read_data_from_database(self, schema_path, loaded_path):
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="<PASSWORD>",
database="even_financial"
)
my_cursor = mydb.cursor()
my_cursor.execute('SELECT * FROM clicks')
table_rows = my_cursor.fetchall()
clicks_df = pd.DataFrame(table_rows)
my_cursor.execute('SELECT * FROM leads')
table_rows = my_cursor.fetchall()
leads_df = pd.DataFrame(table_rows)
my_cursor.execute('SELECT * FROM offers')
table_rows = my_cursor.fetchall()
offers_df = pd.DataFrame(table_rows)
click_schema = pd.read_csv(schema_path + 'clicks_schema.csv', header=1)
lead_schema = pd.read_csv(schema_path + 'leads_schema.csv', header=1)
offer_schema = pd.read_csv(schema_path + 'offers_schema.csv', header=1)
click_columns = click_schema.columns
lead_columns = lead_schema.columns
offer_columns = offer_schema.columns
clicks_df.columns = click_columns
leads_df.columns = lead_columns
offers_df.columns = offer_columns
clicks_df.to_csv(loaded_path + 'clicks.csv', index=False)
leads_df.to_csv(loaded_path + 'leads.csv', index=False)
offers_df.to_csv(loaded_path + 'offers.csv', index=False)
def generate_data_frame(self, loaded_path):
click_df = pd.read_csv(loaded_path + 'clicks.csv')
lead_df = | pd.read_csv(loaded_path + 'leads.csv') | pandas.read_csv |
import platform
from dataclasses import dataclass, field
import numpy as np
import pandas as pd
import rpy2.robjects as robjects
from pandas.tseries import offsets
from rpy2.robjects import numpy2ri, pandas2ri, r
from rpy2.robjects.packages import importr
from epysurv.metrics.outbreak_detection import ghozzi_score
def silence_r_output():
"""Silence output from R code.
This is useful, because some algorithm otherwise print every time they are invoked.
"""
if platform.system() == "Linux":
r.sink("/dev/null")
elif platform.system() == "Windows":
r.sink("NUL")
silence_r_output()
numpy2ri.activate()
pandas2ri.activate()
surveillance = importr("surveillance")
@dataclass
class TimepointSurveillanceAlgorithm:
"""Algorithms that predict outbreaks for every timepoint."""
_training_data: pd.DataFrame = field(init=False, repr=False)
def fit(self, data: pd.DataFrame) -> "TimepointSurveillanceAlgorithm":
"""Expects data with time series index, case counts and outbreak labels."""
# Remove outbreaks cases from baseline.
data = data.copy()
data.n_cases -= data.n_outbreak_cases
self._training_data = data
return self
def predict(self, data: pd.DataFrame):
"""Expects data with time series index and case counts."""
self._data_in_the_future(data)
def score(self, data_with_labels: pd.DataFrame):
prediction_result = self.predict(data_with_labels)
return ghozzi_score(prediction_result)
def _validate_data(self, data: pd.DataFrame):
self._contains_dates(data)
self._contains_counts(data)
def _contains_dates(self, data: pd.DataFrame):
has_dates = "ds" in data.columns or isinstance(data.index, pd.DatetimeIndex)
if not has_dates:
raise ValueError("No dates")
def _contains_counts(self, data: pd.DataFrame):
if not {"n_cases", "n_outbreak_cases"} < set(data.columns):
raise ValueError('No column named "n_cases"')
def _data_in_the_future(self, data: pd.DataFrame):
if data.index.min() <= self._training_data.index.max():
raise ValueError("The prediction data overlaps with the training data.")
offset_to_freq = {
offsets.Day: 365,
offsets.Week: 52,
offsets.MonthBegin: 12,
offsets.MonthEnd: 12,
}
offset_to_attr = {
offsets.Day: "day",
offsets.Week: "week",
offsets.MonthBegin: "month",
offsets.MonthEnd: "month",
}
def _get_freq(data) -> int:
return offset_to_freq[type(data.index.freq)]
def _get_start_epoch(data: pd.DataFrame) -> int:
return getattr(data.index[0], offset_to_attr[type(data.index.freq)])
class SurveillanceRPackageAlgorithm(TimepointSurveillanceAlgorithm):
"""Base class for the algorithm from the R package surveillance."""
def predict(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Predict outbreaks.
Parameters
----------
data
Dataframe with DateTimeIndex containing the columns "n_cases".
Returns
-------
Original dataframe with "alarm" column added.
"""
super().predict(data)
# Concat training and prediction data. make index array for range param.
full_data = (
| pd.concat((self._training_data, data), keys=["train", "test"]) | pandas.concat |
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
# load new EDGAR v5.0 data ---
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
edgar_files = ['CH4', 'CO2_excl_short-cycle_org_C', 'CO2_org_short-cycle_C', 'N2O']
ef = edgar_files[0]
edgar_df = None
for ef in edgar_files:
logger(ef)
ey = 2018 if ef == 'CO2_excl_short-cycle_org_C' else 2015
frame = pd.read_excel(f'{root}\\edgar_v5.0\\v50_{ef}_1970_{ey}.xls', sheet_name='TOTALS BY COUNTRY',
header=9)
frame = frame[['ISO_A3'] + list(range(1970, ey + 1))].rename(columns={'ISO_A3': 'code'}).set_index('code')
frame.columns = frame.columns.rename('year')
frame = frame.unstack().rename(f'edgar50_{ef}').reset_index()
frame = frame[~frame['code'].isin(['SEA', 'AIR'])]
if edgar_df is None:
edgar_df = frame
else:
edgar_df = pd.merge(edgar_df, frame, how='outer')
edgar_df.to_csv(root + '\\edgar_v5.0.csv', index=False)
edgar_df.show()
data = edgar_df.copy()
# find sensible GDP vs population vs CO2eq (or CO2) data vs time ?
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
df = pd.read_csv(root + '\\data_all.csv')
df.show_csv()
df.query('code == "CZE"').show_csv()
df = | pd.merge(df, edgar_df, how='left', on=['code', 'year']) | pandas.merge |
#!/usr/bin/python3
# somewhere you have imported your data in forms of lists
# in these examples the one called `tempo` contains the time, the other contains the values of the data and is called `flusso`
# I am using italian words for time (tempo) and flux (flusso), for avoiding confusion with some keyword of the various call used
# the objet that will be created is named `ts_flusso`
#
# the following examples provide the mimimum needed for working with these packages, obviously for having a more finer control
# you need to read the appropriate documentation of each package
#Pandas
import pandas as pd
ts_flusso=pd.Series(data=flusso,index=tempo)
#stumpy
import stumpy
ts_flusso = pd.DataFrame(np.column_stack([tempo,flusso]),columns=['label_tempo','label_flusso'])
#sunpy
#the method for Pandas is the one that allow to build an object compatible with sunpy
import pandas as pd
import sunpy
ts_flusso= | pd.Series(data=flusso,index=tempo) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import seaborn as sns
from networkx import *
import matplotlib.pyplot as plt
sns.set()
def distance(s_lat, s_lng, e_lat, e_lng):
# approximate radius of earth in km
r = 6373.0
s_lat = s_lat * np.pi / 180.0
s_lng = np.deg2rad(s_lng)
e_lat = np.deg2rad(e_lat)
e_lng = np.deg2rad(e_lng)
d = np.sin((e_lat - s_lat) / 2) ** 2 + np.cos(s_lat) * np.cos(e_lat) * np.sin((e_lng - s_lng) / 2) ** 2
return 2 * r * np.arcsin(np.sqrt(d))
def create_graph(df, auth, auth_lat, auth_long):
g = nx.Graph()
df_copy = df[(df.Centername == auth)].copy().reset_index()
source = df_copy['identity'].tolist()
authlist = [auth, ]
source.extend(authlist)
g.add_nodes_from(source)
for index, c in df_copy.iterrows():
g.add_edge(auth, c['identity'], weight=distance(auth_lat, auth_long, c['Lat'], c['Long']))
for cindex, c in df_copy.iterrows():
for cindex1, c1 in df_copy.iterrows():
if c['identity'] == c1['identity']:
continue
g.add_edge(c['identity'], c1['identity'], weight=distance(c['Lat'], c['Long'], c1['Lat'], c1['Long']))
nx.draw(g)
plt.savefig("{}.png".format(auth))
return g
def graph_list(centers, df_final):
dict_of_graphs = {}
for index, a in centers.iterrows():
g = create_graph(df_final, a['Centername'], a['Center Lat'], a['Center Long'])
dict_of_graphs[a['Centername']] = g
return dict_of_graphs
def find_tsp(centers, df_tot, df_final):
dict_g = graph_list(centers, df_final)
df_path = pd.DataFrame(columns=['lat', 'long', 'auth_name'])
for source, g in dict_g.items():
path = find_best_path(g)
print(path)
for index in range(len(path)):
df = {'lat': df_tot[(df_tot.identity == path[index])].iloc[0].Lat,
'long': df_tot[(df_tot.identity == path[index])].iloc[0].Long,
'auth_name': source}
temp_df = pd.DataFrame([df])
df_path = pd.concat([df_path, temp_df], ignore_index=True)
return df_path
def find_best_path(g):
global smallestdis, best_tsp_path
all_tsp_paths = {}
for source in g.nodes:
path_calc = list(g.nodes)
path_calc.remove(source)
path = [source, ]
dis, path = find_path(g, source, source, path, path_calc)
all_tsp_paths[dis] = path
smallestdis = list(all_tsp_paths.keys())[0]
best_tsp_path = all_tsp_paths[smallestdis]
for dis in all_tsp_paths.keys():
if dis < smallestdis:
best_tsp_path = all_tsp_paths[dis]
return best_tsp_path
def find_path(g, gsource, source, path, path_calc, totdis=0):
if len(path_calc) == 1:
path.append(path_calc[0])
path.append(gsource)
totdis = totdis + nx.single_source_dijkstra(g, gsource, path_calc[0])[0]
return totdis, path
closest_node = path_calc[0]
dis = nx.single_source_dijkstra(g, source, closest_node)[0]
for node in path_calc:
tempdis = nx.single_source_dijkstra(g, source, node)[0]
if tempdis < dis:
closest_node = node
dis = tempdis
path.append(closest_node)
path_calc.remove(closest_node)
totdis = totdis + dis
totdis, path = find_path(g, gsource, closest_node, path, path_calc, totdis)
return totdis, path
def cluster_data(df_cit, df_auth):
km = KMeans(n_clusters=count_auth, random_state=101)
km.fit(X=df_cit[["Lat", "Long"]])
centers = pd.DataFrame(km.cluster_centers_, columns=["Center Lat", "Center Long"])
centers["Cluster"] = centers.index
df_cit["Cluster"] = km.labels_
for index, c in centers.iterrows():
clong = c['Center Long']
clat = c['Center Lat'] # when you have space between the name
ds = []
for ind, auth in df_auth.iterrows():
authlong = auth.Long
authlat = auth.Lat
distance_center = distance(clong, clat, authlong, authlat)
ds.append(distance_center)
idx = np.argmin(np.array(ds))
centers.at[index, "Center Lat"] = df_auth.at[idx, "Lat"]
centers.at[index, "Center Long"] = df_auth.at[idx, "Long"]
centers.at[index, "Centername"] = df_auth.at[idx, "identity"]
df = pd.merge(df_cit, centers)
return df, centers
def get_dataframes(file_name):
global count_auth
df = pd.read_excel(file_name)
for index, c in df.iterrows():
if 'citizen' in c['identity']:
df.at[index, "level"] = '1'
elif 'shop' in c['identity']:
df.at[index, "level"] = '2'
count_auth = count_auth + 1
df_return = df.copy()[['latitude', 'longitude', 'identity', 'level']]
df_return = df_return.rename(columns={"longitude": "Long", 'latitude': "Lat", 'identity': "identity"})
return df_return
file_name_cit = "D:\\Documents\\project\\citizen.xlsx"
file_name_auth = "D:\\Documents\\project\\shop.xlsx"
df_cit = get_dataframes(file_name_cit)
count_auth = 0
df_auth = get_dataframes(file_name_auth)
print(count_auth)
df_tot = | pd.concat([df_cit, df_auth], ignore_index=True) | pandas.concat |
import copy
import logging
import os
import time
from collections import defaultdict
from typing import List, Union, Tuple
import networkx as nx
import numpy as np
import pandas as pd
import psutil
from .utils import process_hyperparameters
from ..augmentation.distill_utils import format_distillation_labels, augment_data
from ..constants import AG_ARGS, BINARY, MULTICLASS, REGRESSION, REFIT_FULL_NAME, REFIT_FULL_SUFFIX
from ..models import AbstractModel, BaggedEnsembleModel, StackerEnsembleModel, WeightedEnsembleModel, GreedyWeightedEnsembleModel, SimpleWeightedEnsembleModel
from ..features.feature_metadata import FeatureMetadata
from ..scheduler.scheduler_factory import scheduler_factory
from ..utils import default_holdout_frac, get_pred_from_proba, generate_train_test_split, infer_eval_metric, compute_permutation_feature_importance, extract_column, compute_weighted_metric
from ..utils.exceptions import TimeLimitExceeded, NotEnoughMemoryError, NoValidFeatures, NoGPUError
from ..utils.loaders import load_pkl
from ..utils.savers import save_json, save_pkl
from ..utils.feature_selection import FeatureSelector
logger = logging.getLogger(__name__)
# FIXME: Below is major defect!
# Weird interaction for metrics like AUC during bagging.
# If kfold = 5, scores are 0.9, 0.85, 0.8, 0.75, and 0.7, the score is not 0.8! It is much lower because probs are combined together and AUC is recalculated
# Do we want this to happen? Should we calculate score by 5 separate scores and then averaging instead?
# TODO: Dynamic model loading for ensemble models during prediction, only load more models if prediction is uncertain. This dynamically reduces inference time.
# TODO: Try midstack Semi-Supervised. Just take final models and re-train them, use bagged preds for SS rows. This would be very cheap and easy to try.
# TODO: Move to autogluon.core
class AbstractTrainer:
trainer_file_name = 'trainer.pkl'
trainer_info_name = 'info.pkl'
trainer_info_json_name = 'info.json'
distill_stackname = 'distill' # name of stack-level for distilled student models
def __init__(self, path: str, problem_type: str, eval_metric=None,
num_classes=None, quantile_levels=None, low_memory=False, feature_metadata=None, k_fold=0, n_repeats=1,
sample_weight=None, weight_evaluation=False, save_data=False, random_state=0, verbosity=2):
self.path = path
self.problem_type = problem_type
self.feature_metadata = feature_metadata
self.save_data = save_data
self.random_state = random_state # Integer value added to the stack level to get the random_state for kfold splits or the train/val split if bagging is disabled
self.verbosity = verbosity
self.sample_weight = sample_weight # TODO: consider redesign where Trainer doesnt need sample_weight column name and weights are separate from X
self.weight_evaluation = weight_evaluation
if eval_metric is not None:
self.eval_metric = eval_metric
else:
self.eval_metric = infer_eval_metric(problem_type=self.problem_type)
logger.log(25, f"AutoGluon will gauge predictive performance using evaluation metric: '{self.eval_metric.name}'")
if not (self.eval_metric.needs_pred or self.eval_metric.needs_quantile):
logger.log(25, "\tThis metric expects predicted probabilities rather than predicted class labels, so you'll need to use predict_proba() instead of predict()")
logger.log(20, "\tTo change this, specify the eval_metric argument of fit()")
self.num_classes = num_classes
self.quantile_levels = quantile_levels
self.feature_prune = False # will be set to True if feature-pruning is turned on.
self.low_memory = low_memory
self.bagged_mode = True if k_fold >= 2 else False
if self.bagged_mode:
self.k_fold = k_fold # int number of folds to do model bagging, < 2 means disabled
self.n_repeats = n_repeats
else:
self.k_fold = 0
self.n_repeats = 1
self.model_best = None
self.models = {} # Dict of model name -> model object. A key, value pair only exists if a model is persisted in memory. # TODO: v0.1 Rename and consider making private
self.model_graph = nx.DiGraph() # Directed Acyclic Graph (DAG) of model interactions. Describes how certain models depend on the predictions of certain other models. Contains numerous metadata regarding each model.
self.model_full_dict = {} # Dict of normal model -> FULL model. FULL models are produced by self.refit_single_full() and self.refit_ensemble_full().
self._model_full_dict_val_score = {} # Dict of FULL model -> normal model validation score in case the normal model had been deleted.
self.reset_paths = False
self._time_limit = None # Internal float of the total time limit allowed for a given fit call. Used in logging statements.
self._time_train_start = None # Internal timestamp of the time training started for a given fit call. Used in logging statements.
self._num_rows_train = None
self._num_cols_train = None
self.is_data_saved = False
self._X_saved = False
self._y_saved = False
self._X_val_saved = False
self._y_val_saved = False
self._groups = None # custom split indices
self._regress_preds_asprobas = False # whether to treat regression predictions as class-probabilities (during distillation)
self._extra_banned_names = set() # Names which are banned but are not used by a trained model.
# self._exceptions_list = [] # TODO: Keep exceptions list for debugging during benchmarking.
# path_root is the directory containing learner.pkl
@property
def path_root(self) -> str:
return self.path.rsplit(os.path.sep, maxsplit=2)[0] + os.path.sep
@property
def path_utils(self) -> str:
return self.path_root + 'utils' + os.path.sep
@property
def path_data(self) -> str:
return self.path_utils + 'data' + os.path.sep
def load_X(self):
if self._X_saved:
path = self.path_data + 'X.pkl'
return load_pkl.load(path=path)
return None
def load_X_val(self):
if self._X_val_saved:
path = self.path_data + 'X_val.pkl'
return load_pkl.load(path=path)
return None
def load_y(self):
if self._y_saved:
path = self.path_data + 'y.pkl'
return load_pkl.load(path=path)
return None
def load_y_val(self):
if self._y_val_saved:
path = self.path_data + 'y_val.pkl'
return load_pkl.load(path=path)
return None
def load_data(self):
X = self.load_X()
y = self.load_y()
X_val = self.load_X_val()
y_val = self.load_y_val()
return X, y, X_val, y_val
def save_X(self, X, verbose=True):
path = self.path_data + 'X.pkl'
save_pkl.save(path=path, object=X, verbose=verbose)
self._X_saved = True
def save_X_val(self, X, verbose=True):
path = self.path_data + 'X_val.pkl'
save_pkl.save(path=path, object=X, verbose=verbose)
self._X_val_saved = True
def save_y(self, y, verbose=True):
path = self.path_data + 'y.pkl'
save_pkl.save(path=path, object=y, verbose=verbose)
self._y_saved = True
def save_y_val(self, y, verbose=True):
path = self.path_data + 'y_val.pkl'
save_pkl.save(path=path, object=y, verbose=verbose)
self._y_val_saved = True
def get_model_names(self, stack_name: Union[List[str], str] = None, level: Union[List[int], int] = None, can_infer: bool = None, models: List[str] = None) -> List[str]:
if models is None:
models = list(self.model_graph.nodes)
if stack_name is not None:
if not isinstance(stack_name, list):
stack_name = [stack_name]
node_attributes: dict = self.get_models_attribute_dict(attribute='stack_name')
models = [model_name for model_name in models if node_attributes[model_name] in stack_name]
if level is not None:
if not isinstance(level, list):
level = [level]
node_attributes: dict = self.get_models_attribute_dict(attribute='level')
models = [model_name for model_name in models if node_attributes[model_name] in level]
# TODO: can_infer is technically more complicated, if an ancestor can't infer then the model can't infer.
if can_infer is not None:
node_attributes = self.get_models_attribute_dict(attribute='can_infer')
models = [model for model in models if node_attributes[model] == can_infer]
return models
def get_max_level(self, stack_name: str = None, models: List[str] = None) -> int:
models = self.get_model_names(stack_name=stack_name, models=models)
models_attribute_dict = self.get_models_attribute_dict(attribute='level', models=models)
if models_attribute_dict:
return max(list(models_attribute_dict.values()))
else:
return -1
def construct_model_templates(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:
"""Constructs a list of unfit models based on the hyperparameters dict."""
raise NotImplementedError
def construct_model_templates_distillation(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:
"""Constructs a list of unfit models based on the hyperparameters dict for softclass distillation."""
raise NotImplementedError
def get_model_level(self, model_name: str) -> int:
return self.get_model_attribute(model=model_name, attribute='level')
def set_contexts(self, path_context):
self.path, model_paths = self.create_contexts(path_context)
for model, path in model_paths.items():
self.set_model_attribute(model=model, attribute='path', val=path)
def create_contexts(self, path_context: str) -> (str, dict):
path = path_context
model_paths = self.get_models_attribute_dict(attribute='path')
for model, prev_path in model_paths.items():
model_local_path = prev_path.split(self.path, 1)[1]
new_path = path + model_local_path
model_paths[model] = new_path
return path, model_paths
def fit(self, X, y, hyperparameters: dict, X_val=None, y_val=None, **kwargs):
raise NotImplementedError
# TODO: Enable easier re-mapping of trained models -> hyperparameters input (They don't share a key since name can change)
def train_multi_levels(self, X, y, hyperparameters: dict, X_val=None, y_val=None, X_unlabeled=None, base_model_names: List[str] = None,
core_kwargs: dict = None, aux_kwargs: dict = None, level_start=1, level_end=1, time_limit=None, name_suffix: str = None,
relative_stack=True, level_time_modifier=0.333) -> List[str]:
"""
Trains a multi-layer stack ensemble using the input data on the hyperparameters dict input.
hyperparameters is used to determine the models used in each stack layer.
If continuing a stack ensemble with level_start>1, ensure that base_model_names is set to the appropriate base models that will be used by the level_start level models.
Trains both core and aux models.
core models are standard models which are fit on the data features. Core models will also use model predictions if base_model_names was specified or if level != 1.
aux models are ensemble models which only use the predictions of core models as features. These models never use the original features.
level_time_modifier : float, default 0.333
The amount of extra time given relatively to early stack levels compared to later stack levels.
If 0, then all stack levels are given 100%/L of the time, where L is the number of stack levels.
If 1, then all stack levels are given 100% of the time, meaning if the first level uses all of the time given to it, the other levels won't train.
Time given to a level = remaining_time / remaining_levels * (1 + level_time_modifier), capped by total remaining time.
Returns a list of the model names that were trained from this method call, in order of fit.
"""
self._time_limit = time_limit
self._time_train_start = time.time()
time_train_start = self._time_train_start
hyperparameters = self._process_hyperparameters(hyperparameters=hyperparameters)
if relative_stack:
if level_start != 1:
raise AssertionError(f'level_start must be 1 when `relative_stack=True`. (level_start = {level_start})')
level_add = 0
if base_model_names:
max_base_model_level = self.get_max_level(models=base_model_names)
level_start = max_base_model_level + 1
level_add = level_start - 1
level_end += level_add
if level_start != 1:
hyperparameters_relative = {}
for key in hyperparameters:
if isinstance(key, int):
hyperparameters_relative[key+level_add] = hyperparameters[key]
else:
hyperparameters_relative[key] = hyperparameters[key]
hyperparameters = hyperparameters_relative
core_kwargs = {} if core_kwargs is None else core_kwargs.copy()
aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()
model_names_fit = []
if level_start != level_end:
logger.log(20, f'AutoGluon will fit {level_end - level_start + 1} stack levels (L{level_start} to L{level_end}) ...')
for level in range(level_start, level_end + 1):
core_kwargs_level = core_kwargs.copy()
aux_kwargs_level = aux_kwargs.copy()
if time_limit is not None:
time_train_level_start = time.time()
levels_left = level_end - level + 1
time_left = time_limit - (time_train_level_start - time_train_start)
time_limit_for_level = min(time_left / levels_left * (1 + level_time_modifier), time_left)
time_limit_core = time_limit_for_level
time_limit_aux = max(time_limit_for_level * 0.1, min(time_limit, 360)) # Allows aux to go over time_limit, but only by a small amount
core_kwargs_level['time_limit'] = core_kwargs_level.get('time_limit', time_limit_core)
aux_kwargs_level['time_limit'] = aux_kwargs_level.get('time_limit', time_limit_aux)
base_model_names, aux_models = self.stack_new_level(
X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled,
models=hyperparameters, level=level, base_model_names=base_model_names,
core_kwargs=core_kwargs_level, aux_kwargs=aux_kwargs_level, name_suffix=name_suffix,
)
model_names_fit += base_model_names + aux_models
self._time_limit = None
self.save()
return model_names_fit
def stack_new_level(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None, level=1, base_model_names: List[str] = None,
core_kwargs: dict = None, aux_kwargs: dict = None, name_suffix: str = None) -> (List[str], List[str]):
"""
Similar to calling self.stack_new_level_core, except auxiliary models will also be trained via a call to self.stack_new_level_aux, with the models trained from self.stack_new_level_core used as base models.
"""
if base_model_names is None:
base_model_names = []
if level < 1:
raise AssertionError(f'Stack level must be >= 1, but level={level}.')
elif not base_model_names and level > 1:
logger.log(30, f'Warning: Training models at stack level {level}, but no base models were specified.')
elif base_model_names and level == 1:
raise AssertionError(f'Stack level 1 models cannot have base models, but base_model_names={base_model_names}.')
core_kwargs = {} if core_kwargs is None else core_kwargs.copy()
aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()
if name_suffix:
core_kwargs['name_suffix'] = core_kwargs.get('name_suffix', '') + name_suffix
aux_kwargs['name_suffix'] = aux_kwargs.get('name_suffix', '') + name_suffix
core_models = self.stack_new_level_core(X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, models=models,
level=level, base_model_names=base_model_names, **core_kwargs)
if X_val is None:
aux_models = self.stack_new_level_aux(X=X, y=y, base_model_names=core_models, level=level+1, **aux_kwargs)
else:
aux_models = self.stack_new_level_aux(X=X_val, y=y_val, fit=False, base_model_names=core_models, level=level+1, **aux_kwargs)
return core_models, aux_models
def stack_new_level_core(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None,
level=1, base_model_names: List[str] = None, stack_name='core',
ag_args=None, ag_args_fit=None, ag_args_ensemble=None, excluded_model_types=None, ensemble_type=StackerEnsembleModel,
name_suffix: str = None, get_models_func=None, refit_full=False, **kwargs) -> List[str]:
"""
Trains all models using the data provided.
If level > 1, then the models will use base model predictions as additional features.
The base models used can be specified via base_model_names.
If self.bagged_mode, then models will be trained as StackerEnsembleModels.
The data provided in this method should not contain stack features, as they will be automatically generated if necessary.
"""
if get_models_func is None:
get_models_func = self.construct_model_templates
if base_model_names is None:
base_model_names = []
if not self.bagged_mode and level != 1:
raise ValueError('Stack Ensembling is not valid for non-bagged mode.')
if isinstance(models, dict):
get_models_kwargs = dict(
level=level,
name_suffix=name_suffix,
ag_args=ag_args,
ag_args_fit=ag_args_fit,
excluded_model_types=excluded_model_types,
)
if self.bagged_mode:
if level == 1:
(base_model_names, base_model_paths, base_model_types) = (None, None, None)
elif level > 1:
base_model_names, base_model_paths, base_model_types = self._get_models_load_info(model_names=base_model_names)
if len(base_model_names) == 0:
logger.log(20, 'No base models to train on, skipping stack level...')
return []
else:
raise AssertionError(f'Stack level cannot be less than 1! level = {level}')
ensemble_kwargs = {
'base_model_names': base_model_names,
'base_model_paths_dict': base_model_paths,
'base_model_types_dict': base_model_types,
'random_state': level + self.random_state,
}
get_models_kwargs.update(dict(
ag_args_ensemble=ag_args_ensemble,
ensemble_type=ensemble_type,
ensemble_kwargs=ensemble_kwargs,
))
models, model_args_fit = get_models_func(hyperparameters=models, **get_models_kwargs)
if model_args_fit:
hyperparameter_tune_kwargs = {
model_name: model_args_fit[model_name]['hyperparameter_tune_kwargs']
for model_name in model_args_fit if 'hyperparameter_tune_kwargs' in model_args_fit[model_name]
}
kwargs['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
logger.log(20, f'Fitting {len(models)} L{level} models ...')
X_init = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=True)
if X_val is not None:
X_val = self.get_inputs_to_stacker(X_val, base_models=base_model_names, fit=False)
if refit_full and X_val is not None:
X_init = pd.concat([X_init, X_val])
y = pd.concat([y, y_val])
X_val = None
y_val = None
if X_unlabeled is not None:
X_unlabeled = self.get_inputs_to_stacker(X_unlabeled, base_models=base_model_names, fit=False)
fit_kwargs = dict(num_classes=self.num_classes)
# FIXME: TODO: v0.1 X_unlabeled isn't cached so it won't be available during refit_full or fit_extra.
return self._train_multi(X=X_init, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled,
models=models, level=level, stack_name=stack_name, fit_kwargs=fit_kwargs, **kwargs)
# TODO: Consider making level be auto-determined based off of max(base_model_levels)+1
# TODO: Remove name_suffix, hacked in
# TODO: X can be optional because it isn't needed if fit=True
def stack_new_level_aux(self, X, y, base_model_names: List[str], level, fit=True, stack_name='aux1', time_limit=None, name_suffix: str = None, get_models_func=None, check_if_best=True) -> List[str]:
"""
Trains auxiliary models (currently a single weighted ensemble) using the provided base models.
Level must be greater than the level of any of the base models.
Auxiliary models never use the original features and only train with the predictions of other models as features.
"""
X_stack_preds = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=fit, use_orig_features=False)
if self.weight_evaluation:
X, w = extract_column(X, self.sample_weight) # TODO: consider redesign with w as separate arg instead of bundled inside X
if w is not None:
X_stack_preds[self.sample_weight] = w.values/w.mean()
return self.generate_weighted_ensemble(X=X_stack_preds, y=y, level=level, base_model_names=base_model_names, k_fold=1, n_repeats=1, stack_name=stack_name, time_limit=time_limit, name_suffix=name_suffix, get_models_func=get_models_func, check_if_best=check_if_best)
def predict(self, X, model=None):
if model is None:
model = self._get_best()
return self._predict_model(X, model)
def predict_proba(self, X, model=None):
if model is None:
model = self._get_best()
return self._predict_proba_model(X, model)
def _get_best(self):
if self.model_best is not None:
return self.model_best
else:
return self.get_model_best()
# Note: model_pred_proba_dict is mutated in this function to minimize memory usage
def get_inputs_to_model(self, model, X, model_pred_proba_dict=None, fit=False, preprocess_nonadaptive=False):
"""
For output X:
If preprocess_nonadaptive=False, call model.predict(X)
If preprocess_nonadaptive=True, call model.predict(X, preprocess_nonadaptive=False)
"""
if isinstance(model, str):
# TODO: Remove unnecessary load when no stacking
model = self.load_model(model)
model_level = self.get_model_level(model.name)
if model_level > 1 and isinstance(model, StackerEnsembleModel):
if fit:
model_pred_proba_dict = None
else:
model_set = self.get_minimum_model_set(model)
model_set = [m for m in model_set if m != model.name] # TODO: Can probably be faster, get this result from graph
model_pred_proba_dict = self.get_model_pred_proba_dict(X=X, models=model_set, model_pred_proba_dict=model_pred_proba_dict, fit=fit)
X = model.preprocess(X=X, preprocess_nonadaptive=preprocess_nonadaptive, fit=fit, model_pred_proba_dict=model_pred_proba_dict)
elif preprocess_nonadaptive:
X = model.preprocess(X=X, preprocess_stateful=False)
return X
def score(self, X, y, model=None, weights=None) -> float:
if self.eval_metric.needs_pred or self.eval_metric.needs_quantile:
y_pred = self.predict(X=X, model=model)
else:
y_pred = self.predict_proba(X=X, model=model)
return compute_weighted_metric(y, y_pred, self.eval_metric, weights, weight_evaluation=self.weight_evaluation,
quantile_levels=self.quantile_levels)
def score_with_y_pred_proba(self, y, y_pred_proba, weights=None) -> float:
if self.eval_metric.needs_pred or self.eval_metric.needs_quantile:
y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=self.problem_type)
else:
y_pred = y_pred_proba
return compute_weighted_metric(y, y_pred, self.eval_metric, weights, weight_evaluation=self.weight_evaluation,
quantile_levels=self.quantile_levels)
# TODO: Consider adding persist to disk functionality for pred_proba dictionary to lessen memory burden on large multiclass problems.
# For datasets with 100+ classes, this function could potentially run the system OOM due to each pred_proba numpy array taking significant amounts of space.
# This issue already existed in the previous level-based version but only had the minimum required predictions in memory at a time, whereas this has all model predictions in memory.
# TODO: Add memory optimal topological ordering -> Minimize amount of pred_probas in memory at a time, delete pred probas that are no longer required
# Optimally computes pred_probas for each model in `models`. Will compute each necessary model only once and store its predictions in a dictionary.
# Note: Mutates model_pred_proba_dict and model_pred_time_dict input if present to minimize memory usage
# fit = get oof pred proba
# if record_pred_time is `True`, outputs tuple of dicts (model_pred_proba_dict, model_pred_time_dict), else output only model_pred_proba_dict
def get_model_pred_proba_dict(self, X, models, model_pred_proba_dict=None, model_pred_time_dict=None, fit=False, record_pred_time=False):
if model_pred_proba_dict is None:
model_pred_proba_dict = {}
if model_pred_time_dict is None:
model_pred_time_dict = {}
if fit:
model_pred_order = [model for model in models if model not in model_pred_proba_dict.keys()]
else:
model_set = set()
for model in models:
if model in model_set:
continue
min_model_set = set(self.get_minimum_model_set(model))
model_set = model_set.union(min_model_set)
model_set = model_set.difference(set(model_pred_proba_dict.keys()))
models_to_load = list(model_set)
subgraph = nx.subgraph(self.model_graph, models_to_load)
# For model in model_pred_proba_dict, remove model node from graph and all ancestors that have no remaining descendants and are not in `models`
models_to_ignore = [model for model in models_to_load if (model not in models) and (not list(subgraph.successors(model)))]
while models_to_ignore:
model = models_to_ignore[0]
predecessors = list(subgraph.predecessors(model))
subgraph.remove_node(model)
models_to_ignore = models_to_ignore[1:]
for predecessor in predecessors:
if (predecessor not in models) and (not list(subgraph.successors(predecessor))) and (predecessor not in models_to_ignore):
models_to_ignore.append(predecessor)
# Get model prediction order
model_pred_order = list(nx.lexicographical_topological_sort(subgraph))
# Compute model predictions in topological order
for model_name in model_pred_order:
if record_pred_time:
time_start = time.time()
if fit:
model_type = self.get_model_attribute(model=model_name, attribute='type')
if issubclass(model_type, BaggedEnsembleModel):
model_path = self.get_model_attribute(model=model_name, attribute='path')
model_pred_proba_dict[model_name] = model_type.load_oof(path=model_path)
else:
raise AssertionError(f'Model {model_name} must be a BaggedEnsembleModel to return oof_pred_proba')
else:
model = self.load_model(model_name=model_name)
if isinstance(model, StackerEnsembleModel):
preprocess_kwargs = dict(infer=False, model_pred_proba_dict=model_pred_proba_dict)
model_pred_proba_dict[model_name] = model.predict_proba(X, **preprocess_kwargs)
else:
model_pred_proba_dict[model_name] = model.predict_proba(X)
if record_pred_time:
time_end = time.time()
model_pred_time_dict[model_name] = time_end - time_start
if record_pred_time:
return model_pred_proba_dict, model_pred_time_dict
else:
return model_pred_proba_dict
# TODO: Remove _get_inputs_to_stacker_legacy eventually, move logic internally into this function instead
def get_inputs_to_stacker(self, X, base_models, model_pred_proba_dict=None, fit=False, use_orig_features=True):
if base_models is None:
base_models = []
if not fit:
model_pred_proba_dict = self.get_model_pred_proba_dict(X=X, models=base_models, model_pred_proba_dict=model_pred_proba_dict)
model_pred_proba_list = [model_pred_proba_dict[model] for model in base_models]
else:
# TODO: After _get_inputs_to_stacker_legacy is removed, this if/else is not necessary, instead pass fit param to get_model_pred_proba_dict()
model_pred_proba_list = None
X_stacker_input = self._get_inputs_to_stacker_legacy(X=X, level_start=1, level_end=2, model_levels={1: base_models}, y_pred_probas=model_pred_proba_list, fit=fit)
if not use_orig_features:
X_stacker_input = X_stacker_input.drop(columns=X.columns)
return X_stacker_input
# TODO: Legacy code, still used during training because it is technically slightly faster and more memory efficient than get_model_pred_proba_dict()
# Remove in future as it limits flexibility in stacker inputs during training
def _get_inputs_to_stacker_legacy(self, X, level_start, level_end, model_levels, y_pred_probas=None, fit=False):
if level_start > level_end:
raise AssertionError(f'level_start cannot be greater than level end: ({level_start}, {level_end})')
if (level_start == 1) and (level_end == 1):
return X
if fit:
if level_start > 1:
dummy_stacker_start = self._get_dummy_stacker(level=level_start, model_levels=model_levels, use_orig_features=True)
cols_to_drop = dummy_stacker_start.stack_columns
X = X.drop(cols_to_drop, axis=1)
dummy_stacker = self._get_dummy_stacker(level=level_end, model_levels=model_levels, use_orig_features=True)
X = dummy_stacker.preprocess(X=X, preprocess_nonadaptive=False, fit=True, compute_base_preds=True)
elif y_pred_probas is not None:
if y_pred_probas == []:
return X
dummy_stacker = self._get_dummy_stacker(level=level_end, model_levels=model_levels, use_orig_features=True)
X_stacker = dummy_stacker.pred_probas_to_df(pred_proba=y_pred_probas, index=X.index)
if dummy_stacker.params['use_orig_features']:
if level_start > 1:
dummy_stacker_start = self._get_dummy_stacker(level=level_start, model_levels=model_levels, use_orig_features=True)
cols_to_drop = dummy_stacker_start.stack_columns
X = X.drop(cols_to_drop, axis=1)
X = pd.concat([X_stacker, X], axis=1)
else:
X = X_stacker
else:
dummy_stackers = {}
for level in range(level_start, level_end+1):
if level > 1:
dummy_stackers[level] = self._get_dummy_stacker(level=level, model_levels=model_levels, use_orig_features=True)
for level in range(level_start, level_end):
if level > 1:
cols_to_drop = dummy_stackers[level].stack_columns
else:
cols_to_drop = []
X = dummy_stackers[level+1].preprocess(X=X, preprocess_nonadaptive=False, fit=False, compute_base_preds=True)
if len(cols_to_drop) > 0:
X = X.drop(cols_to_drop, axis=1)
return X
# You must have previously called fit() with cache_data=True
# Fits _FULL versions of specified models, but does NOT link them (_FULL stackers will still use normal models as input)
def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None, models=None) -> List[str]:
if X is None:
X = self.load_X()
if X_val is None:
X_val = self.load_X_val()
if y is None:
y = self.load_y()
if y_val is None:
y_val = self.load_y_val()
if models is None:
models = self.get_model_names()
model_levels = dict()
ignore_models = []
ignore_stack_names = [REFIT_FULL_NAME]
for stack_name in ignore_stack_names:
ignore_models += self.get_model_names(stack_name=stack_name) # get_model_names returns [] if stack_name does not exist
models = [model for model in models if model not in ignore_models]
for model in models:
model_level = self.get_model_level(model)
if model_level not in model_levels:
model_levels[model_level] = []
model_levels[model_level].append(model)
levels = sorted(model_levels.keys())
models_trained_full = []
model_full_dict = {}
for level in levels:
models_level = model_levels[level]
for model in models_level:
model = self.load_model(model)
model_name = model.name
model_full = model.convert_to_refit_full_template()
# Mitigates situation where bagged models barely had enough memory and refit requires more. Worst case results in OOM, but this lowers chance of failure.
model_full._user_params_aux['max_memory_usage_ratio'] = model.params_aux['max_memory_usage_ratio'] * 1.15
# TODO: Do it for all models in the level at once to avoid repeated processing of data?
base_model_names = self.get_base_model_names(model_name)
stacker_type = type(model)
if issubclass(stacker_type, WeightedEnsembleModel):
# TODO: Technically we don't need to re-train the weighted ensemble, we could just copy the original and re-use the weights.
w = None
if X_val is None:
if self.weight_evaluation:
X, w = extract_column(X, self.sample_weight)
X_stack_preds = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=True, use_orig_features=False)
y_input = y
else:
if self.weight_evaluation:
X_val, w = extract_column(X_val, self.sample_weight)
X_stack_preds = self.get_inputs_to_stacker(X_val, base_models=base_model_names, fit=False, use_orig_features=False) # TODO: May want to cache this during original fit, as we do with OOF preds
y_input = y_val
if w is not None:
X_stack_preds[self.sample_weight] = w.values/w.mean()
orig_weights = model._get_model_weights()
base_model_names = list(orig_weights.keys())
weights = list(orig_weights.values())
child_hyperparameters = {
AG_ARGS: {'model_type': 'SIMPLE_ENS_WEIGHTED'},
'weights': weights,
}
# TODO: stack_name=REFIT_FULL_NAME_AUX?
models_trained = self.generate_weighted_ensemble(X=X_stack_preds, y=y_input, level=level, stack_name=REFIT_FULL_NAME, k_fold=1, n_repeats=1,
base_model_names=base_model_names, name_suffix=REFIT_FULL_SUFFIX, save_bag_folds=True,
check_if_best=False, child_hyperparameters=child_hyperparameters)
# TODO: Do the below more elegantly, ideally as a parameter to the trainer train function to disable recording scores/pred time.
for model_weighted_ensemble in models_trained:
model_loaded = self.load_model(model_weighted_ensemble)
model_loaded.val_score = None
model_loaded.predict_time = None
self.set_model_attribute(model=model_weighted_ensemble, attribute='val_score', val=None)
self.save_model(model_loaded)
else:
models_trained = self.stack_new_level_core(X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, models=[model_full], base_model_names=base_model_names, level=level, stack_name=REFIT_FULL_NAME,
hyperparameter_tune_kwargs=None, feature_prune=False, k_fold=0, n_repeats=1, ensemble_type=stacker_type, refit_full=True)
if len(models_trained) == 1:
model_full_dict[model_name] = models_trained[0]
for model_trained in models_trained:
self._model_full_dict_val_score[model_trained] = self.get_model_attribute(model_name, 'val_score')
models_trained_full += models_trained
keys_to_del = []
for model in model_full_dict.keys():
if model_full_dict[model] not in models_trained_full:
keys_to_del.append(model)
for key in keys_to_del:
del model_full_dict[key]
self.model_full_dict.update(model_full_dict)
self.save() # TODO: This could be more efficient by passing in arg to not save if called by refit_ensemble_full since it saves anyways later.
return models_trained_full
# Fits _FULL models and links them in the stack so _FULL models only use other _FULL models as input during stacking
# If model is specified, will fit all _FULL models that are ancestors of the provided model, automatically linking them.
# If no model is specified, all models are refit and linked appropriately.
def refit_ensemble_full(self, model='all') -> dict:
if model == 'all':
ensemble_set = self.get_model_names()
else:
if model == 'best':
model = self.get_model_best()
ensemble_set = self.get_minimum_model_set(model)
existing_models = self.get_model_names()
ensemble_set_valid = []
for model in ensemble_set:
if model in self.model_full_dict and self.model_full_dict[model] in existing_models:
logger.log(20, f"Model '{model}' already has a refit _FULL model: '{self.model_full_dict[model]}', skipping refit...")
else:
ensemble_set_valid.append(model)
if ensemble_set_valid:
models_trained_full = self.refit_single_full(models=ensemble_set_valid)
else:
models_trained_full = []
for model_full in models_trained_full:
# TODO: Consider moving base model info to a separate pkl file so that it can be edited without having to load/save the model again
# Downside: Slower inference speed when models are not persisted in memory prior.
model_loaded = self.load_model(model_full)
if isinstance(model_loaded, StackerEnsembleModel):
for stack_column_prefix in model_loaded.stack_column_prefix_lst:
base_model = model_loaded.stack_column_prefix_to_model_map[stack_column_prefix]
new_base_model = self.model_full_dict[base_model]
new_base_model_type = self.get_model_attribute(model=new_base_model, attribute='type')
new_base_model_path = self.get_model_attribute(model=new_base_model, attribute='path')
model_loaded.base_model_paths_dict[new_base_model] = new_base_model_path
model_loaded.base_model_types_dict[new_base_model] = new_base_model_type
model_loaded.base_model_names.append(new_base_model)
model_loaded.stack_column_prefix_to_model_map[stack_column_prefix] = new_base_model
model_loaded.save() # TODO: Avoid this!
# Remove old edges and add new edges
edges_to_remove = list(self.model_graph.in_edges(model_loaded.name))
self.model_graph.remove_edges_from(edges_to_remove)
if isinstance(model_loaded, StackerEnsembleModel):
for stack_column_prefix in model_loaded.stack_column_prefix_lst:
base_model_name = model_loaded.stack_column_prefix_to_model_map[stack_column_prefix]
self.model_graph.add_edge(base_model_name, model_loaded.name)
self.save()
return copy.deepcopy(self.model_full_dict)
# TODO: Take best performance model with lowest inference
def get_model_best(self, can_infer=None, allow_full=True):
models = self.get_model_names(can_infer=can_infer)
if not models:
raise AssertionError('Trainer has no fit models that can infer.')
model_performances = self.get_models_attribute_dict(attribute='val_score')
perfs = [(m, model_performances[m]) for m in models if model_performances[m] is not None]
if not perfs:
model_full_dict_inverse = {full: orig for orig, full in self.model_full_dict.items()}
models = [m for m in models if m in model_full_dict_inverse]
perfs = [(m, self._get_full_model_val_score(m)) for m in models]
if not perfs:
raise AssertionError('No fit models that can infer exist with a validation score to choose the best model.')
elif not allow_full:
raise AssertionError('No fit models that can infer exist with a validation score to choose the best model, but refit_full models exist. Set `allow_full=True` to get the best refit_full model.')
return max(perfs, key=lambda i: i[1])[0]
def save_model(self, model, reduce_memory=True):
# TODO: In future perhaps give option for the reduce_memory_size arguments, perhaps trainer level variables specified by user?
if reduce_memory:
model.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)
if self.low_memory:
model.save()
else:
self.models[model.name] = model
def save(self):
models = self.models
if self.low_memory:
self.models = {}
save_pkl.save(path=self.path + self.trainer_file_name, object=self)
if self.low_memory:
self.models = models
def persist_models(self, model_names='all', with_ancestors=False, max_memory=None) -> List[str]:
if model_names == 'all':
model_names = self.get_model_names()
elif model_names == 'best':
if self.model_best is not None:
model_names = [self.model_best]
else:
model_names = [self.get_model_best(can_infer=True)]
if not isinstance(model_names, list):
raise ValueError(f'model_names must be a list of model names. Invalid value: {model_names}')
if with_ancestors:
model_names = self.get_minimum_models_set(model_names)
model_names_already_persisted = [model_name for model_name in model_names if model_name in self.models]
if model_names_already_persisted:
logger.log(30, f'The following {len(model_names_already_persisted)} models were already persisted and will be ignored in the model loading process: {model_names_already_persisted}')
model_names = [model_name for model_name in model_names if model_name not in model_names_already_persisted]
if not model_names:
logger.log(30, f'No valid unpersisted models were specified to be persisted, so no change in model persistence was performed.')
return []
if max_memory is not None:
info = self.get_models_info(model_names)
model_mem_size_map = {model: info[model]['memory_size'] for model in model_names}
for model in model_mem_size_map:
if 'children_info' in info[model]:
for child in info[model]['children_info'].values():
model_mem_size_map[model] += child['memory_size']
total_mem_required = sum(model_mem_size_map.values())
available_mem = psutil.virtual_memory().available
memory_proportion = total_mem_required / available_mem
if memory_proportion > max_memory:
logger.log(30, f'Models will not be persisted in memory as they are expected to require {round(memory_proportion * 100, 2)}% of memory, which is greater than the specified max_memory limit of {round(max_memory*100, 2)}%.')
logger.log(30, f'\tModels will be loaded on-demand from disk to maintain safe memory usage, increasing inference latency. If inference latency is a concern, try to use smaller models or increase the value of max_memory.')
return []
else:
logger.log(20, f'Persisting {len(model_names)} models in memory. Models will require {round(memory_proportion*100, 2)}% of memory.')
models = []
for model_name in model_names:
model = self.load_model(model_name)
self.models[model.name] = model
models.append(model)
for model in models:
# TODO: Move this to model code
if isinstance(model, BaggedEnsembleModel):
for fold, fold_model in enumerate(model.models):
if isinstance(fold_model, str):
model.models[fold] = model.load_child(fold_model)
return model_names
# TODO: model_name change to model in params
def load_model(self, model_name: str, path: str = None, model_type=None) -> AbstractModel:
if isinstance(model_name, AbstractModel):
return model_name
if model_name in self.models.keys():
return self.models[model_name]
else:
if path is None:
path = self.get_model_attribute(model=model_name, attribute='path')
if model_type is None:
model_type = self.get_model_attribute(model=model_name, attribute='type')
return model_type.load(path=path, reset_paths=self.reset_paths)
def unpersist_models(self, model_names='all') -> list:
if model_names == 'all':
model_names = list(self.models.keys())
if not isinstance(model_names, list):
raise ValueError(f'model_names must be a list of model names. Invalid value: {model_names}')
unpersisted_models = []
for model in model_names:
if model in self.models:
self.models.pop(model)
unpersisted_models.append(model)
if unpersisted_models:
logger.log(20, f'Unpersisted {len(unpersisted_models)} models: {unpersisted_models}')
else:
logger.log(30, f'No valid persisted models were specified to be unpersisted, so no change in model persistence was performed.')
return unpersisted_models
def generate_weighted_ensemble(self, X, y, level, base_model_names, k_fold=1, n_repeats=1, stack_name=None, hyperparameters=None,
time_limit=None, name_suffix: str = None, save_bag_folds=None, check_if_best=True, child_hyperparameters=None,
get_models_func=None) -> List[str]:
if get_models_func is None:
get_models_func = self.construct_model_templates
if len(base_model_names) == 0:
logger.log(20, 'No base models to train on, skipping weighted ensemble...')
return []
if child_hyperparameters is None:
child_hyperparameters = {}
if save_bag_folds is None:
can_infer_dict = self.get_models_attribute_dict('can_infer', models=base_model_names)
if False in can_infer_dict.values():
save_bag_folds = False
else:
save_bag_folds = True
weighted_ensemble_model, _ = get_models_func(
hyperparameters={
'default': {
'ENS_WEIGHTED': [child_hyperparameters],
}
},
ensemble_type=WeightedEnsembleModel,
ensemble_kwargs=dict(
base_model_names=base_model_names,
base_model_paths_dict=self.get_models_attribute_dict(attribute='path', models=base_model_names),
base_model_types_dict=self.get_models_attribute_dict(attribute='type', models=base_model_names),
base_model_types_inner_dict=self.get_models_attribute_dict(attribute='type_inner', models=base_model_names),
base_model_performances_dict=self.get_models_attribute_dict(attribute='val_score', models=base_model_names),
hyperparameters=hyperparameters,
random_state=level + self.random_state,
),
ag_args={'name_bag_suffix': ''},
ag_args_ensemble={'save_bag_folds': save_bag_folds},
name_suffix=name_suffix,
level=level,
)
weighted_ensemble_model = weighted_ensemble_model[0]
w = None
if self.weight_evaluation:
X, w = extract_column(X, self.sample_weight)
models = self._train_multi(
X=X,
y=y,
X_val=None,
y_val=None,
models=[weighted_ensemble_model],
k_fold=k_fold,
n_repeats=n_repeats,
hyperparameter_tune_kwargs=None,
stack_name=stack_name,
level=level,
time_limit=time_limit,
ens_sample_weight=w,
fit_kwargs=dict(num_classes=self.num_classes, groups=None), # FIXME: Is this the right way to do this?
)
for weighted_ensemble_model_name in models:
if check_if_best and weighted_ensemble_model_name in self.get_model_names():
if self.model_best is None:
self.model_best = weighted_ensemble_model_name
else:
best_score = self.get_model_attribute(self.model_best, 'val_score')
cur_score = self.get_model_attribute(weighted_ensemble_model_name, 'val_score')
if cur_score > best_score:
# new best model
self.model_best = weighted_ensemble_model_name
return models
def _train_single(self, X, y, model: AbstractModel, X_val=None, y_val=None, **model_fit_kwargs) -> AbstractModel:
"""
Trains model but does not add the trained model to this Trainer.
Returns trained model object.
"""
model = model.fit(X=X, y=y, X_val=X_val, y_val=y_val, **model_fit_kwargs)
return model
def _train_and_save(self, X, y, model: AbstractModel, X_val=None, y_val=None, stack_name='core', level=1, **model_fit_kwargs) -> List[str]:
"""
Trains model and saves it to disk, returning a list with a single element: The name of the model, or no elements if training failed.
If the model name is returned:
The model can be accessed via self.load_model(model.name).
The model will have metadata information stored in self.model_graph.
The model's name will be appended to self.models_level[stack_name][level]
The model will be accessible and usable through any Trainer function that takes as input 'model' or 'model_name'.
Note: self._train_and_save should not be used outside of self._train_single_full
"""
fit_start_time = time.time()
time_limit = model_fit_kwargs.get('time_limit', None)
model_names_trained = []
try:
fit_log_message = f'Fitting model: {model.name} ...'
if time_limit is not None:
if time_limit <= 0:
logger.log(15, f'Skipping {model.name} due to lack of time remaining.')
return model_names_trained
if self._time_limit is not None and self._time_train_start is not None:
time_left_total = self._time_limit - (fit_start_time - self._time_train_start)
else:
time_left_total = time_limit
fit_log_message += f' Training model for up to {round(time_limit, 2)}s of the {round(time_left_total, 2)}s of remaining time.'
logger.log(20, fit_log_message)
model = self._train_single(X, y, model, X_val, y_val, **model_fit_kwargs)
fit_end_time = time.time()
if self.weight_evaluation:
w = model_fit_kwargs.get('sample_weight', None)
w_val = model_fit_kwargs.get('sample_weight_val', None)
else:
w = None
w_val = None
if isinstance(model, BaggedEnsembleModel):
if X_val is not None and y_val is not None:
score = model.score(X=X_val, y=y_val, sample_weight=w_val)
elif model.is_valid_oof() or isinstance(model, WeightedEnsembleModel):
score = model.score_with_oof(y=y, sample_weight=w)
else:
score = None
else:
if X_val is not None and y_val is not None:
score = model.score(X=X_val, y=y_val, sample_weight=w_val)
else:
score = None
pred_end_time = time.time()
if model.fit_time is None:
model.fit_time = fit_end_time - fit_start_time
if model.predict_time is None:
if score is None:
model.predict_time = None
else:
model.predict_time = pred_end_time - fit_end_time
model.val_score = score
# TODO: Add recursive=True to avoid repeatedly loading models each time this is called for bagged ensembles (especially during repeated bagging)
self.save_model(model=model)
except TimeLimitExceeded:
logger.log(20, f'\tTime limit exceeded... Skipping {model.name}.')
# logger.log(20, '\tTime wasted: ' + str(time.time() - fit_start_time))
del model
except NotEnoughMemoryError:
logger.warning(f'\tNot enough memory to train {model.name}... Skipping this model.')
del model
except NoValidFeatures:
logger.warning(f'\tNo valid features to train {model.name}... Skipping this model.')
del model
except NoGPUError:
logger.warning(f'\tNo GPUs available to train {model.name}... Skipping this model.')
del model
except ImportError as err:
logger.error(f'\tWarning: Exception caused {model.name} to fail during training (ImportError)... Skipping this model.')
logger.error(f'\t\t{err}')
if self.verbosity > 2:
logger.exception('Detailed Traceback:')
except Exception as err:
logger.error(f'\tWarning: Exception caused {model.name} to fail during training... Skipping this model.')
logger.error(f'\t\t{err}')
if self.verbosity > 0:
logger.exception('Detailed Traceback:')
del model
else:
self._add_model(model=model, stack_name=stack_name, level=level)
model_names_trained.append(model.name)
if self.low_memory:
del model
return model_names_trained
def _add_model(self, model: AbstractModel, stack_name: str = 'core', level: int = 1) -> bool:
"""
Registers the fit model in the Trainer object. Stores information such as model performance, save path, model type, and more.
To use a model in Trainer, self._add_model must be called.
If self.low_memory, then the model object will be deleted after this call. Use Trainer directly to leverage the model further.
Parameters
----------
model : AbstractModel
Model which has been fit. This model will be registered to the Trainer.
stack_name : str, default 'core'
Stack name to assign the model to. This is used for advanced functionality.
level : int, default 1
Stack level of the stack name to assign the model to. This is used for advanced functionality.
The model's name is appended to self.models_level[stack_name][level]
The model's base_models (if it has any) must all be a lower level than the model.
Returns
-------
boolean, True if model was registered, False if model was found to be invalid and not registered.
"""
if model.val_score is not None:
if model.eval_metric.name != self.eval_metric.name:
logger.log(20, f'\tNote: model has different eval_metric than default.')
logger.log(20, f'\t{round(model.val_score, 4)}\t = Validation score ({model.eval_metric.name})')
if model.fit_time is not None:
logger.log(20, f'\t{round(model.fit_time, 2)}s\t = Training runtime')
if model.predict_time is not None:
logger.log(20, f'\t{round(model.predict_time, 2)}s\t = Validation runtime')
if model.val_score is not None and np.isnan(model.val_score):
logger.warning(f'WARNING: {model.name} has a val_score of {model.val_score} (NaN)! This should never happen. The model will not be saved to avoid instability.')
return False
# TODO: Add to HPO
if isinstance(model, BaggedEnsembleModel):
type_inner = model._child_type
else:
type_inner = type(model)
self.model_graph.add_node(
model.name,
fit_time=model.fit_time,
predict_time=model.predict_time,
val_score=model.val_score,
path=model.path,
type=type(model), # Outer type, can be BaggedEnsemble, StackEnsemble (Type that is able to load the model)
type_inner=type_inner, # Inner type, if Ensemble then it is the type of the inner model (May not be able to load with this type)
can_infer=model.can_infer(),
can_fit=model.can_fit(),
is_valid=model.is_valid(),
stack_name=stack_name,
level=level,
**model._fit_metadata,
)
if isinstance(model, StackerEnsembleModel):
prior_models = self.get_model_names()
# TODO: raise exception if no base models and level != 1?
for stack_column_prefix in model.stack_column_prefix_lst:
base_model_name = model.stack_column_prefix_to_model_map[stack_column_prefix]
if base_model_name not in prior_models:
raise AssertionError(f"Model '{model.name}' depends on model '{base_model_name}', but '{base_model_name}' is not registered as a trained model! Valid models: {prior_models}")
elif level <= self.model_graph.nodes[base_model_name]['level']:
raise AssertionError(f"Model '{model.name}' depends on model '{base_model_name}', but '{base_model_name}' is not in a lower stack level. ('{model.name}' level: {level}, '{base_model_name}' level: {self.model_graph.nodes[base_model_name]['level']})")
self.model_graph.add_edge(base_model_name, model.name)
if self.low_memory:
del model
return True
# TODO: Split this to avoid confusion, HPO should go elsewhere?
def _train_single_full(self, X, y, model: AbstractModel, X_unlabeled=None, X_val=None, y_val=None,
hyperparameter_tune_kwargs=None, stack_name='core', k_fold=None, k_fold_start=0, k_fold_end=None,
n_repeats=None, n_repeat_start=0, level=1, time_limit=None, fit_kwargs=None, **kwargs) -> List[str]:
"""
Trains a model, with the potential to train multiple versions of this model with hyperparameter tuning and feature pruning.
Returns a list of successfully trained and saved model names.
Models trained from this method will be accessible in this Trainer.
"""
model_fit_kwargs = self._get_model_fit_kwargs(X=X, X_val=X_val, time_limit=time_limit, k_fold=k_fold, fit_kwargs=fit_kwargs,
ens_sample_weight=kwargs.get('ens_sample_weight', None))
if hyperparameter_tune_kwargs:
if n_repeat_start != 0:
raise ValueError(f'n_repeat_start must be 0 to hyperparameter_tune, value = {n_repeat_start}')
elif k_fold_start != 0:
raise ValueError(f'k_fold_start must be 0 to hyperparameter_tune, value = {k_fold_start}')
if not isinstance(hyperparameter_tune_kwargs, tuple):
num_trials = 1 if time_limit is None else 1000
hyperparameter_tune_kwargs = scheduler_factory(hyperparameter_tune_kwargs, num_trials=num_trials, nthreads_per_trial='auto', ngpus_per_trial='auto')
# hpo_models (dict): keys = model_names, values = model_paths
logger.log(20, f'Hyperparameter tuning model: {model.name} ...')
try:
if isinstance(model, BaggedEnsembleModel):
hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X=X, y=y, k_fold=k_fold, scheduler_options=hyperparameter_tune_kwargs, **model_fit_kwargs)
else:
hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X=X, y=y, X_val=X_val, y_val=y_val, scheduler_options=hyperparameter_tune_kwargs, **model_fit_kwargs)
except Exception as err:
logger.exception(f'Warning: Exception caused {model.name} to fail during hyperparameter tuning... Skipping this model.')
logger.warning(err)
del model
model_names_trained = []
else:
# Commented out because it takes too much space (>>5 GB if run for an hour on a small-medium sized dataset)
# self.hpo_results[model.name] = hpo_results
model_names_trained = []
self._extra_banned_names.add(model.name)
for model_hpo_name, model_path in hpo_models.items():
model_hpo = self.load_model(model_hpo_name, path=model_path, model_type=type(model))
logger.log(20, f'Fitted model: {model_hpo.name} ...')
if self._add_model(model=model_hpo, stack_name=stack_name, level=level):
model_names_trained.append(model_hpo.name)
else:
if isinstance(model, BaggedEnsembleModel):
bagged_model_fit_kwargs = self._get_bagged_model_fit_kwargs(k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start)
model_fit_kwargs.update(bagged_model_fit_kwargs)
model_names_trained = self._train_and_save(X, y, model, X_val, y_val, X_unlabeled=X_unlabeled, stack_name=stack_name, level=level, **model_fit_kwargs)
self.save()
return model_names_trained
# TODO: How to deal with models that fail during this? They have trained valid models before, but should we still use those models or remove the entire model? Currently we still use models.
# TODO: Time allowance can be made better by only using time taken during final model training and not during HPO and feature pruning.
# TODO: Time allowance not accurate if running from fit_continue
# TODO: Remove level and stack_name arguments, can get them automatically
# TODO: Make sure that pretraining on X_unlabeled only happens 1 time rather than every fold of bagging. (Do during pretrain API work?)
def _train_multi_repeats(self, X, y, models: list, n_repeats, n_repeat_start=1, time_limit=None, time_limit_total_level=None, **kwargs) -> List[str]:
"""
Fits bagged ensemble models with additional folds and/or bagged repeats.
Models must have already been fit prior to entering this method.
This method should only be called in self._train_multi
Returns a list of successfully trained and saved model names.
"""
if time_limit_total_level is None:
time_limit_total_level = time_limit
models_valid = models
models_valid_next = []
repeats_completed = 0
time_start = time.time()
for n in range(n_repeat_start, n_repeats):
if not models_valid:
break # No models to repeat
if time_limit is not None:
time_start_repeat = time.time()
time_left = time_limit - (time_start_repeat - time_start)
if n == n_repeat_start:
time_required = time_limit_total_level * 0.575 # Require slightly over 50% to be safe
else:
time_required = (time_start_repeat - time_start) / repeats_completed * (0.575/0.425)
if time_left < time_required:
logger.log(15, 'Not enough time left to finish repeated k-fold bagging, stopping early ...')
break
logger.log(20, f'Repeating k-fold bagging: {n+1}/{n_repeats}')
for i, model in enumerate(models_valid):
if not self.get_model_attribute(model=model, attribute='can_fit'):
if isinstance(model, str):
models_valid_next.append(model)
else:
models_valid_next.append(model.name)
continue
if isinstance(model, str):
model = self.load_model(model)
if not isinstance(model, BaggedEnsembleModel):
raise AssertionError(f'{model.name} must inherit from BaggedEnsembleModel to perform repeated k-fold bagging. Model type: {type(model).__name__}')
if time_limit is None:
time_left = None
else:
time_start_model = time.time()
time_left = time_limit - (time_start_model - time_start)
models_valid_next += self._train_single_full(X=X, y=y, model=model, k_fold_start=0, k_fold_end=None, n_repeats=n + 1, n_repeat_start=n, time_limit=time_left, **kwargs)
models_valid = copy.deepcopy(models_valid_next)
models_valid_next = []
repeats_completed += 1
logger.log(20, f'Completed {n_repeat_start + repeats_completed}/{n_repeats} k-fold bagging repeats ...')
return models_valid
def _train_multi_initial(self, X, y, models: List[AbstractModel], k_fold, n_repeats, hyperparameter_tune_kwargs=None, time_limit=None, feature_prune_kwargs=None, **kwargs):
"""
Fits models that have not previously been fit.
This method should only be called in self._train_multi
Returns a list of successfully trained and saved model names.
"""
multi_fold_time_start = time.time()
fit_args = dict(
X=X,
y=y,
k_fold=k_fold,
)
fit_args.update(kwargs)
hpo_enabled = False
if hyperparameter_tune_kwargs:
for key in hyperparameter_tune_kwargs:
if hyperparameter_tune_kwargs[key] is not None:
hpo_enabled = True
break
hpo_time_ratio = 0.9
if hpo_enabled:
time_split = True
else:
time_split = False
k_fold_start = 0
bagged = k_fold > 0
if not bagged:
time_ratio = hpo_time_ratio if hpo_enabled else 1
models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
time_limit=time_limit, time_split=time_split, time_ratio=time_ratio, **fit_args)
else:
bagged_time_start = time.time()
if hpo_enabled:
time_ratio = (1 / k_fold) * hpo_time_ratio
models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
k_fold_start=0, k_fold_end=1, n_repeats=n_repeats, n_repeat_start=0, time_limit=time_limit,
time_split=time_split, time_ratio=time_ratio, **fit_args)
k_fold_start = 1
bagged_time_limit = time_limit - (time.time() - bagged_time_start) if time_limit is not None else None
models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=None, k_fold_start=k_fold_start,
k_fold_end=k_fold, n_repeats=n_repeats, n_repeat_start=0, time_limit=bagged_time_limit, **fit_args)
multi_fold_time_elapsed = time.time() - multi_fold_time_start
if time_limit is not None:
time_limit = time_limit - multi_fold_time_elapsed
if feature_prune_kwargs is not None and len(models) > 0:
feature_prune_time_start = time.time()
model_fit_kwargs = self._get_model_fit_kwargs(X=X, X_val=kwargs.get('X_val', None), time_limit=None, k_fold=k_fold,
fit_kwargs=kwargs.get('fit_kwargs', {}), ens_sample_weight=kwargs.get('ens_sample_weight'))
model_fit_kwargs.update(dict(X=X, y=y, X_val=kwargs.get('X_val', None), y_val=kwargs.get('y_val', None)))
if bagged:
bagged_model_fit_kwargs = self._get_bagged_model_fit_kwargs(k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold, n_repeats=n_repeats, n_repeat_start=0)
model_fit_kwargs.update(bagged_model_fit_kwargs)
candidate_features = self._proxy_model_feature_prune(time_limit=time_limit, layer_fit_time=multi_fold_time_elapsed, level=kwargs['level'],
features=X.columns.tolist(), model_fit_kwargs=model_fit_kwargs, **feature_prune_kwargs)
if time_limit is not None:
time_limit = time_limit - (time.time() - feature_prune_time_start)
fit_args['X'] = X[candidate_features]
fit_args['X_val'] = kwargs['X_val'][candidate_features] if isinstance(kwargs.get('X_val', None), pd.DataFrame) else kwargs.get('X_val', None)
if len(candidate_features) < len(X.columns):
unfit_models = []
original_prune_map = {}
for model in models:
unfit_model = self.load_model(model).convert_to_template()
unfit_model.rename(f"{unfit_model.name}_Prune")
unfit_models.append(unfit_model)
original_prune_map[unfit_model.name] = model
pruned_models = self._train_multi_fold(models=unfit_models, hyperparameter_tune_kwargs=None, k_fold_start=k_fold_start,
k_fold_end=k_fold, n_repeats=n_repeats, n_repeat_start=0, time_limit=time_limit, **fit_args)
force_prune = feature_prune_kwargs.get('force_prune', False)
models = self._retain_better_pruned_models(pruned_models=pruned_models, original_prune_map=original_prune_map, force_prune=force_prune)
return models
# TODO: Ban KNN from being a Stacker model outside of aux. Will need to ensemble select on all stack layers ensemble selector to make it work
# TODO: Robert dataset, LightGBM is super good but RF and KNN take all the time away from it on 1h despite being much worse
# TODO: Add time_limit_per_model
# TODO: Rename for v0.1
def _train_multi_fold(self, X, y, models: List[AbstractModel], time_limit=None, time_split=False, time_ratio=1, hyperparameter_tune_kwargs=None, **kwargs) -> List[str]:
"""
Trains and saves a list of models sequentially.
This method should only be called in self._train_multi_initial
Returns a list of trained model names.
"""
models_valid = []
time_start = time.time()
if time_limit is not None:
time_limit = time_limit * time_ratio
if time_limit is not None and len(models) > 0:
time_limit_model_split = time_limit / len(models)
else:
time_limit_model_split = time_limit
for i, model in enumerate(models):
if isinstance(model, str):
model = self.load_model(model)
elif self.low_memory:
model = copy.deepcopy(model)
if hyperparameter_tune_kwargs is not None and isinstance(hyperparameter_tune_kwargs, dict):
hyperparameter_tune_kwargs_model = hyperparameter_tune_kwargs.get(model.name, None)
else:
hyperparameter_tune_kwargs_model = None
# TODO: Only update scores when finished, only update model as part of final models if finished!
if time_split:
time_left = time_limit_model_split
else:
if time_limit is None:
time_left = None
else:
time_start_model = time.time()
time_left = time_limit - (time_start_model - time_start)
model_name_trained_lst = self._train_single_full(X, y, model, time_limit=time_left, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs_model, **kwargs)
if self.low_memory:
del model
models_valid += model_name_trained_lst
return models_valid
def _train_multi(self, X, y, models: List[AbstractModel], hyperparameter_tune_kwargs=None, feature_prune_kwargs=None, k_fold=None, n_repeats=None, n_repeat_start=0, time_limit=None, **kwargs) -> List[str]:
"""
Train a list of models using the same data.
Assumes that input data has already been processed in the form the models will receive as input (including stack feature generation).
Trained models are available in the trainer object.
Note: Consider using public APIs instead of this.
Returns a list of trained model names.
"""
time_limit_total_level = time_limit
if k_fold is None:
k_fold = self.k_fold
if n_repeats is None:
n_repeats = self.n_repeats
if (k_fold == 0) and (n_repeats != 1):
raise ValueError(f'n_repeats must be 1 when k_fold is 0, values: ({n_repeats}, {k_fold})')
if time_limit is None and feature_prune_kwargs is None:
n_repeats_initial = n_repeats
else:
n_repeats_initial = 1
if n_repeat_start == 0:
time_start = time.time()
model_names_trained = self._train_multi_initial(X=X, y=y, models=models, k_fold=k_fold, n_repeats=n_repeats_initial, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
feature_prune_kwargs=feature_prune_kwargs, time_limit=time_limit, **kwargs)
n_repeat_start = n_repeats_initial
if time_limit is not None:
time_limit = time_limit - (time.time() - time_start)
else:
model_names_trained = models
if (n_repeats > 1) and (n_repeat_start < n_repeats):
model_names_trained = self._train_multi_repeats(X=X, y=y, models=model_names_trained,
k_fold=k_fold, n_repeats=n_repeats, n_repeat_start=n_repeat_start, time_limit=time_limit, time_limit_total_level=time_limit_total_level, **kwargs)
return model_names_trained
def _train_multi_and_ensemble(self, X, y, X_val, y_val, hyperparameters: dict = None, X_unlabeled=None, num_stack_levels=0, time_limit=None, groups=None, **kwargs) -> List[str]:
"""Identical to self.train_multi_levels, but also saves the data to disk. This should only ever be called once."""
if self.save_data and not self.is_data_saved:
self.save_X(X)
self.save_y(y)
if X_val is not None:
self.save_X_val(X_val)
if y_val is not None:
self.save_y_val(y_val)
self.is_data_saved = True
if self._groups is None:
self._groups = groups
self._num_rows_train = len(X)
if X_val is not None:
self._num_rows_train += len(X_val)
self._num_cols_train = len(list(X.columns))
model_names_fit = self.train_multi_levels(X, y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val,
X_unlabeled=X_unlabeled, level_start=1, level_end=num_stack_levels+1, time_limit=time_limit, **kwargs)
if len(self.get_model_names()) == 0:
raise ValueError('AutoGluon did not successfully train any models')
return model_names_fit
def _predict_model(self, X, model, model_pred_proba_dict=None):
if isinstance(model, str):
model = self.load_model(model)
X = self.get_inputs_to_model(model=model, X=X, model_pred_proba_dict=model_pred_proba_dict, fit=False)
y_pred = model.predict(X=X)
if self._regress_preds_asprobas and model.problem_type == REGRESSION: # Convert regression preds to classes (during distillation)
if (len(y_pred.shape) > 1) and (y_pred.shape[1] > 1):
problem_type = MULTICLASS
else:
problem_type = BINARY
y_pred = get_pred_from_proba(y_pred_proba=y_pred, problem_type=problem_type)
return y_pred
def _predict_proba_model(self, X, model, model_pred_proba_dict=None):
if isinstance(model, str):
model = self.load_model(model)
X = self.get_inputs_to_model(model=model, X=X, model_pred_proba_dict=model_pred_proba_dict, fit=False)
return model.predict_proba(X=X)
def _get_dummy_stacker(self, level: int, model_levels: dict, use_orig_features=True) -> StackerEnsembleModel:
model_names = model_levels[level - 1]
base_models_dict = {}
for model_name in model_names:
if model_name in self.models.keys():
base_models_dict[model_name] = self.models[model_name]
hyperparameters = dict(
use_orig_features=use_orig_features,
max_base_models_per_type=0,
max_base_models=0,
)
dummy_stacker = StackerEnsembleModel(
path='',
name='',
model_base=AbstractModel(
path='',
name='',
problem_type=self.problem_type,
eval_metric=self.eval_metric,
hyperparameters={'ag_args_fit': {'quantile_levels': self.quantile_levels}}
),
base_model_names=model_names,
base_models_dict=base_models_dict,
base_model_paths_dict=self.get_models_attribute_dict(attribute='path', models=model_names),
base_model_types_dict=self.get_models_attribute_dict(attribute='type', models=model_names),
hyperparameters=hyperparameters,
random_state=level+self.random_state
)
dummy_stacker.initialize(num_classes=self.num_classes)
return dummy_stacker
def _proxy_model_feature_prune(self, model_fit_kwargs: dict, time_limit: float, layer_fit_time: float, level: int, features: List[str], **feature_prune_kwargs: dict) -> List[str]:
"""
Uses the best LightGBM-based base learner of this layer to perform time-aware permutation feature importance based feature pruning.
If all LightGBM models fail, use the model that achieved the highest validation accuracy. Feature pruning gets the smaller of the
remaining layer time limit and k times (default=2) it took to fit the base learners of this layer as its resource. Note that feature pruning can
exit earlier based on arguments in feature_prune_kwargs. The method returns the list of feature names that survived the pruning procedure.
Parameters
----------
feature_prune_kwargs : dict
Feature pruning kwarg arguments. Should contain arguments passed to FeatureSelector.select_features. One can optionally attach the following
additional kwargs that are consumed at this level: 'proxy_model_class' to use a model of particular type with the highest validation score as the
proxy model, 'feature_prune_time_limit' to manually specify how long we should perform the feature pruning procedure for, 'k' to specify how long
we should perform feature pruning for if 'feature_prune_time_limit' has not been set (feature selection time budget is set to k * layer_fit_time),
and 'raise_exception' to signify that AutoGluon should throw an exception if feature pruning errors out.
time_limit : float
Time limit left within the current stack layer in seconds. Feature pruning should never take more than this time.
layer_fit_time : float
How long it took to fit all the models in this layer once. Used to calculate how long to feature prune for.
level : int
Level of this stack layer.
features: List[str]
The list of feature names in the inputted dataset.
Returns
-------
candidate_features : List[str]
Feature names that survived the pruning procedure.
"""
k = feature_prune_kwargs.pop('k', 2)
proxy_model_class = feature_prune_kwargs.pop('proxy_model_class', self._get_default_proxy_model_class())
feature_prune_time_limit = feature_prune_kwargs.pop('feature_prune_time_limit', None)
raise_exception_on_fail = feature_prune_kwargs.pop('raise_exception', False)
proxy_model = self._get_feature_prune_proxy_model(proxy_model_class=proxy_model_class, level=level)
if proxy_model is None:
return features
if feature_prune_time_limit is not None:
feature_prune_time_limit = min(max(time_limit - layer_fit_time, 0), feature_prune_time_limit)
elif time_limit is not None:
feature_prune_time_limit = min(max(time_limit - layer_fit_time, 0), max(k * layer_fit_time, 0.05 * time_limit))
else:
feature_prune_time_limit = max(k * layer_fit_time, 300)
if feature_prune_time_limit < 2 * proxy_model.fit_time:
logger.warning(f"Insufficient time to train even a single feature pruning model (remaining: {feature_prune_time_limit}, "
f"needed: {proxy_model.fit_time}). Skipping feature pruning.")
return features
selector = FeatureSelector(model=proxy_model, time_limit=feature_prune_time_limit,
raise_exception=raise_exception_on_fail, problem_type=self.problem_type)
candidate_features = selector.select_features(**feature_prune_kwargs, **model_fit_kwargs)
return candidate_features
def _get_default_proxy_model_class(self):
return None
def _retain_better_pruned_models(self, pruned_models: List[str], original_prune_map: dict, force_prune: bool = False) -> List[str]:
"""
Compares models fit on the pruned set of features with their counterpart, models fit on full set of features.
Take the model that achieved a higher validation set score and delete the other from self.model_graph.
Parameters
----------
pruned_models : List[str]
A list of pruned model names.
original_prune_map : dict
A dictionary mapping the names of models fitted on pruned features to the names of models fitted on original features.
force_prune : bool, default = False
If set to true, force all base learners to work with the pruned set of features.
Returns
----------
models : List[str]
A list of model names.
"""
models = []
for pruned_model in pruned_models:
original_model = original_prune_map[pruned_model]
leaderboard = self.leaderboard()
original_score = leaderboard[leaderboard['model'] == original_model]['score_val'].item()
pruned_score = leaderboard[leaderboard['model'] == pruned_model]['score_val'].item()
score_str = f"({round(pruned_score, 4)} vs {round(original_score, 4)})"
if force_prune:
logger.log(30, f"Pruned score vs original score is {score_str}. Replacing original model since force_prune=True...")
self.delete_models(models_to_delete=original_model, dry_run=False)
models.append(pruned_model)
elif pruned_score > original_score:
logger.log(30, f"Model trained with feature pruning score is better than original model's score {score_str}. Replacing original model...")
self.delete_models(models_to_delete=original_model, dry_run=False)
models.append(pruned_model)
else:
logger.log(30, f"Model trained with feature pruning score is not better than original model's score {score_str}. Keeping original model...")
self.delete_models(models_to_delete=pruned_model, dry_run=False)
models.append(original_model)
return models
# TODO: Enable raw=True for bagged models when X=None
# This is non-trivial to implement for multi-layer stacking ensembles on the OOF data.
# TODO: Consider limiting X to 10k rows here instead of inside the model call
def get_feature_importance(self, model=None, X=None, y=None, raw=True, **kwargs) -> pd.DataFrame:
if model is None:
model = self.model_best
model: AbstractModel = self.load_model(model)
if X is None and model.val_score is None:
raise AssertionError(f'Model {model.name} is not valid for generating feature importances on original training data because no validation data was used during training, please specify new test data to compute feature importances.')
if X is None:
if isinstance(model, WeightedEnsembleModel):
if self.bagged_mode:
if raw:
raise AssertionError('`feature_stage=\'transformed\'` feature importance on the original training data is not yet supported when bagging is enabled, please specify new test data to compute feature importances.')
X = None
is_oof = True
else:
if raw:
X = self.load_X_val()
else:
X = None
is_oof = False
elif isinstance(model, BaggedEnsembleModel):
if raw:
raise AssertionError('`feature_stage=\'transformed\'` feature importance on the original training data is not yet supported when bagging is enabled, please specify new test data to compute feature importances.')
X = self.load_X()
X = self.get_inputs_to_model(model=model, X=X, fit=True)
is_oof = True
else:
X = self.load_X_val()
if not raw:
X = self.get_inputs_to_model(model=model, X=X, fit=False)
is_oof = False
else:
is_oof = False
if not raw:
X = self.get_inputs_to_model(model=model, X=X, fit=False)
if y is None and X is not None:
if is_oof:
y = self.load_y()
else:
y = self.load_y_val()
if raw:
return self._get_feature_importance_raw(X=X, y=y, model=model, **kwargs)
else:
if is_oof:
kwargs['is_oof'] = is_oof
return model.compute_feature_importance(X=X, y=y, **kwargs)
# TODO: Can get feature importances of all children of model at no extra cost, requires scoring the values after predict_proba on each model
# Could solve by adding a self.score_all() function which takes model as input and also returns scores of all children models.
# This would be best solved after adding graph representation, it lives most naturally in AbstractModel
# TODO: Can skip features which were pruned on all models that model depends on (Complex to implement, requires graph representation)
# TODO: Note that raw importance will not equal non-raw importance for bagged models, even if raw features are identical to the model features.
# This is because for non-raw, we do an optimization where each fold model calls .compute_feature_importance(), and then the feature importances are averaged across the folds.
# This is different from raw, where the predictions of the folds are averaged and then feature importance is computed.
# Consider aligning these methods so they produce the same result.
# The output of this function is identical to non-raw when model is level 1 and non-bagged
def _get_feature_importance_raw(self, X, y, model, eval_metric=None, **kwargs) -> pd.DataFrame:
if eval_metric is None:
eval_metric = self.eval_metric
if model is None:
model = self.model_best
if eval_metric.needs_pred:
predict_func = self.predict
else:
predict_func = self.predict_proba
model: AbstractModel = self.load_model(model)
predict_func_kwargs = dict(model=model)
return compute_permutation_feature_importance(
X=X, y=y, predict_func=predict_func, predict_func_kwargs=predict_func_kwargs, eval_metric=eval_metric, **kwargs
)
def _get_models_load_info(self, model_names):
model_names = copy.deepcopy(model_names)
model_paths = self.get_models_attribute_dict(attribute='path', models=model_names)
model_types = self.get_models_attribute_dict(attribute='type', models=model_names)
return model_names, model_paths, model_types
# Sums the attribute value across all models that the provided model depends on, including itself.
# For instance, this function can return the expected total predict_time of a model.
# attribute is the name of the desired attribute to be summed, or a dictionary of model name -> attribute value if the attribute is not present in the graph.
def get_model_attribute_full(self, model, attribute, func=sum):
base_model_set = self.get_minimum_model_set(model)
if isinstance(attribute, dict):
is_dict = True
else:
is_dict = False
if len(base_model_set) == 1:
if is_dict:
return attribute[model]
else:
return self.model_graph.nodes[base_model_set[0]][attribute]
# attribute_full = 0
attribute_lst = []
for base_model in base_model_set:
if is_dict:
attribute_base_model = attribute[base_model]
else:
attribute_base_model = self.model_graph.nodes[base_model][attribute]
if attribute_base_model is None:
return None
attribute_lst.append(attribute_base_model)
# attribute_full += attribute_base_model
if attribute_lst:
attribute_full = func(attribute_lst)
else:
attribute_full = 0
return attribute_full
# Returns dictionary of model name -> attribute value for the provided attribute
def get_models_attribute_dict(self, attribute, models: list = None) -> dict:
models_attribute_dict = nx.get_node_attributes(self.model_graph, attribute)
if models is not None:
model_names = []
for model in models:
if not isinstance(model, str):
model = model.name
model_names.append(model)
models_attribute_dict = {key: val for key, val in models_attribute_dict.items() if key in model_names}
return models_attribute_dict
# TODO: v0.1 Proper error catching
# Returns attribute value for the given model
def get_model_attribute(self, model, attribute: str):
if not isinstance(model, str):
model = model.name
return self.model_graph.nodes[model][attribute]
def set_model_attribute(self, model, attribute: str, val):
if not isinstance(model, str):
model = model.name
self.model_graph.nodes[model][attribute] = val
# Gets the minimum set of models that the provided model depends on, including itself
# Returns a list of model names
def get_minimum_model_set(self, model, include_self=True) -> list:
if not isinstance(model, str):
model = model.name
minimum_model_set = list(nx.bfs_tree(self.model_graph, model, reverse=True))
if not include_self:
minimum_model_set = [m for m in minimum_model_set if m != model]
return minimum_model_set
# Gets the minimum set of models that the provided models depend on, including themselves
# Returns a list of model names
def get_minimum_models_set(self, models: list) -> list:
models_set = set()
for model in models:
models_set = models_set.union(self.get_minimum_model_set(model))
return list(models_set)
# Gets the set of base models used directly by the provided model
# Returns a list of model names
def get_base_model_names(self, model) -> list:
if not isinstance(model, str):
model = model.name
base_model_set = list(self.model_graph.predecessors(model))
return base_model_set
def _get_banned_model_names(self) -> list:
"""Gets all model names which would cause model files to be overwritten if a new model was trained with the name"""
return self.get_model_names() + list(self._extra_banned_names)
def leaderboard(self, extra_info=False):
model_names = self.get_model_names()
score_val = []
fit_time_marginal = []
pred_time_val_marginal = []
stack_level = []
fit_time = []
pred_time_val = []
can_infer = []
fit_order = list(range(1, len(model_names)+1))
score_val_dict = self.get_models_attribute_dict('val_score')
fit_time_marginal_dict = self.get_models_attribute_dict('fit_time')
predict_time_marginal_dict = self.get_models_attribute_dict('predict_time')
for model_name in model_names:
score_val.append(score_val_dict[model_name])
fit_time_marginal.append(fit_time_marginal_dict[model_name])
fit_time.append(self.get_model_attribute_full(model=model_name, attribute='fit_time'))
pred_time_val_marginal.append(predict_time_marginal_dict[model_name])
pred_time_val.append(self.get_model_attribute_full(model=model_name, attribute='predict_time'))
stack_level.append(self.get_model_level(model_name))
can_infer.append(self.model_graph.nodes[model_name]['can_infer'])
model_info_dict = defaultdict(list)
if extra_info:
# TODO: feature_metadata
# TODO: disk size
# TODO: load time
# TODO: Add persist_if_mem_safe() function to persist in memory all models if reasonable memory size (or a specific model+ancestors)
# TODO: Add is_persisted() function to check which models are persisted in memory
# TODO: package_dependencies, package_dependencies_full
info = self.get_info(include_model_info=True)
model_info = info['model_info']
custom_model_info = {}
for model_name in model_info:
custom_info = {}
bagged_info = model_info[model_name].get('bagged_info', {})
custom_info['num_models'] = bagged_info.get('num_child_models', 1)
custom_info['memory_size'] = bagged_info.get('max_memory_size', model_info[model_name]['memory_size'])
custom_info['memory_size_min'] = bagged_info.get('min_memory_size', model_info[model_name]['memory_size'])
custom_info['child_model_type'] = bagged_info.get('child_model_type', None)
custom_info['child_hyperparameters'] = bagged_info.get('child_hyperparameters', None)
custom_info['child_hyperparameters_fit'] = bagged_info.get('child_hyperparameters_fit', None)
custom_info['child_ag_args_fit'] = bagged_info.get('child_ag_args_fit', None)
custom_model_info[model_name] = custom_info
model_info_keys = ['num_features', 'model_type', 'hyperparameters', 'hyperparameters_fit', 'ag_args_fit', 'features']
model_info_sum_keys = []
for key in model_info_keys:
model_info_dict[key] = [model_info[model_name][key] for model_name in model_names]
if key in model_info_sum_keys:
key_dict = {model_name: model_info[model_name][key] for model_name in model_names}
model_info_dict[key + '_full'] = [self.get_model_attribute_full(model=model_name, attribute=key_dict) for model_name in model_names]
model_info_keys = ['num_models', 'memory_size', 'memory_size_min', 'child_model_type', 'child_hyperparameters', 'child_hyperparameters_fit', 'child_ag_args_fit']
model_info_full_keys = {'memory_size': [('memory_size_w_ancestors', sum)], 'memory_size_min': [('memory_size_min_w_ancestors', max)], 'num_models': [('num_models_w_ancestors', sum)]}
for key in model_info_keys:
model_info_dict[key] = [custom_model_info[model_name][key] for model_name in model_names]
if key in model_info_full_keys:
key_dict = {model_name: custom_model_info[model_name][key] for model_name in model_names}
for column_name, func in model_info_full_keys[key]:
model_info_dict[column_name] = [self.get_model_attribute_full(model=model_name, attribute=key_dict, func=func) for model_name in model_names]
ancestors = [list(nx.dag.ancestors(self.model_graph, model_name)) for model_name in model_names]
descendants = [list(nx.dag.descendants(self.model_graph, model_name)) for model_name in model_names]
model_info_dict['num_ancestors'] = [len(ancestor_lst) for ancestor_lst in ancestors]
model_info_dict['num_descendants'] = [len(descendant_lst) for descendant_lst in descendants]
model_info_dict['ancestors'] = ancestors
model_info_dict['descendants'] = descendants
df = pd.DataFrame(data={
'model': model_names,
'score_val': score_val,
'pred_time_val': pred_time_val,
'fit_time': fit_time,
'pred_time_val_marginal': pred_time_val_marginal,
'fit_time_marginal': fit_time_marginal,
'stack_level': stack_level,
'can_infer': can_infer,
'fit_order': fit_order,
**model_info_dict,
})
df_sorted = df.sort_values(by=['score_val', 'pred_time_val', 'model'], ascending=[False, True, False]).reset_index(drop=True)
df_columns_lst = df_sorted.columns.tolist()
explicit_order = [
'model',
'score_val',
'pred_time_val',
'fit_time',
'pred_time_val_marginal',
'fit_time_marginal',
'stack_level',
'can_infer',
'fit_order',
'num_features',
'num_models',
'num_models_w_ancestors',
'memory_size',
'memory_size_w_ancestors',
'memory_size_min',
'memory_size_min_w_ancestors',
'num_ancestors',
'num_descendants',
'model_type',
'child_model_type'
]
explicit_order = [column for column in explicit_order if column in df_columns_lst]
df_columns_other = [column for column in df_columns_lst if column not in explicit_order]
df_columns_new = explicit_order + df_columns_other
df_sorted = df_sorted[df_columns_new]
return df_sorted
def get_info(self, include_model_info=False) -> dict:
num_models_trained = len(self.get_model_names())
if self.model_best is not None:
best_model = self.model_best
else:
try:
best_model = self.get_model_best()
except AssertionError:
best_model = None
if best_model is not None:
best_model_score_val = self.get_model_attribute(model=best_model, attribute='val_score')
best_model_stack_level = self.get_model_level(best_model)
else:
best_model_score_val = None
best_model_stack_level = None
# fit_time = None
num_bag_folds = self.k_fold
max_core_stack_level = self.get_max_level('core')
max_stack_level = self.get_max_level()
problem_type = self.problem_type
eval_metric = self.eval_metric.name
time_train_start = self._time_train_start
num_rows_train = self._num_rows_train
num_cols_train = self._num_cols_train
num_classes = self.num_classes
# TODO:
# Disk size of models
# Raw feature count
# HPO time
# Bag time
# Feature prune time
# Exception count / models failed count
# True model count (models * kfold)
# AutoGluon version fit on
# Max memory usage
# CPU count used / GPU count used
info = {
'time_train_start': time_train_start,
'num_rows_train': num_rows_train,
'num_cols_train': num_cols_train,
'num_classes': num_classes,
'problem_type': problem_type,
'eval_metric': eval_metric,
'best_model': best_model,
'best_model_score_val': best_model_score_val,
'best_model_stack_level': best_model_stack_level,
'num_models_trained': num_models_trained,
'num_bag_folds': num_bag_folds,
'max_stack_level': max_stack_level,
'max_core_stack_level': max_core_stack_level,
}
if include_model_info:
info['model_info'] = self.get_models_info()
return info
def get_models_info(self, models: List[str] = None) -> dict:
if models is None:
models = self.get_model_names()
model_info_dict = dict()
for model in models:
if isinstance(model, str):
if model in self.models.keys():
model = self.models[model]
if isinstance(model, str):
model_type = self.get_model_attribute(model=model, attribute='type')
model_path = self.get_model_attribute(model=model, attribute='path')
model_info_dict[model] = model_type.load_info(path=model_path)
else:
model_info_dict[model.name] = model.get_info()
return model_info_dict
def reduce_memory_size(self, remove_data=True, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):
if remove_data and self.is_data_saved:
data_files = [
self.path_data + 'X.pkl',
self.path_data + 'X_val.pkl',
self.path_data + 'y.pkl',
self.path_data + 'y_val.pkl',
]
for data_file in data_files:
try:
os.remove(data_file)
except FileNotFoundError:
pass
if requires_save:
self.is_data_saved = False
try:
os.rmdir(self.path_data)
except OSError:
pass
try:
os.rmdir(self.path_utils)
except OSError:
pass
models = self.get_model_names()
for model in models:
model = self.load_model(model)
model.reduce_memory_size(remove_fit_stack=remove_fit_stack, remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, reduce_children=reduce_children, **kwargs)
if requires_save:
self.save_model(model, reduce_memory=False)
if requires_save:
self.save()
# TODO: Also enable deletion of models which didn't succeed in training (files may still be persisted)
# This includes the original HPO fold for stacking
# Deletes specified models from trainer and from disk (if delete_from_disk=True).
def delete_models(self, models_to_keep=None, models_to_delete=None, allow_delete_cascade=False, delete_from_disk=True, dry_run=True):
if models_to_keep is not None and models_to_delete is not None:
raise ValueError('Exactly one of [models_to_keep, models_to_delete] must be set.')
if models_to_keep is not None:
if not isinstance(models_to_keep, list):
models_to_keep = [models_to_keep]
minimum_model_set = set()
for model in models_to_keep:
minimum_model_set.update(self.get_minimum_model_set(model))
minimum_model_set = list(minimum_model_set)
models_to_remove = [model for model in self.get_model_names() if model not in minimum_model_set]
elif models_to_delete is not None:
if not isinstance(models_to_delete, list):
models_to_delete = [models_to_delete]
minimum_model_set = set(models_to_delete)
minimum_model_set_orig = copy.deepcopy(minimum_model_set)
for model in models_to_delete:
minimum_model_set.update(nx.algorithms.dag.descendants(self.model_graph, model))
if not allow_delete_cascade:
if minimum_model_set != minimum_model_set_orig:
raise AssertionError('models_to_delete contains models which cause a delete cascade due to other models being dependent on them. Set allow_delete_cascade=True to enable the deletion.')
minimum_model_set = list(minimum_model_set)
models_to_remove = [model for model in self.get_model_names() if model in minimum_model_set]
else:
raise ValueError('Exactly one of [models_to_keep, models_to_delete] must be set.')
if dry_run:
logger.log(30, f'Dry run enabled, AutoGluon would have deleted the following models: {models_to_remove}')
if delete_from_disk:
for model in models_to_remove:
model = self.load_model(model)
logger.log(30, f'\tDirectory {model.path} would have been deleted.')
logger.log(30, f'To perform the deletion, set dry_run=False')
return
if delete_from_disk:
for model in models_to_remove:
model = self.load_model(model)
model.delete_from_disk()
self.model_graph.remove_nodes_from(models_to_remove)
for model in models_to_remove:
if model in self.models:
self.models.pop(model)
models_kept = self.get_model_names()
if self.model_best is not None and self.model_best not in models_kept:
try:
self.model_best = self.get_model_best()
except AssertionError:
self.model_best = None
# TODO: Delete from all the other model dicts
self.save()
@classmethod
def load(cls, path, reset_paths=False):
load_path = path + cls.trainer_file_name
if not reset_paths:
return load_pkl.load(path=load_path)
else:
obj = load_pkl.load(path=load_path)
obj.set_contexts(path)
obj.reset_paths = reset_paths
return obj
@classmethod
def load_info(cls, path, reset_paths=False, load_model_if_required=True):
load_path = path + cls.trainer_info_name
try:
return load_pkl.load(path=load_path)
except:
if load_model_if_required:
trainer = cls.load(path=path, reset_paths=reset_paths)
return trainer.get_info()
else:
raise
def save_info(self, include_model_info=False):
info = self.get_info(include_model_info=include_model_info)
save_pkl.save(path=self.path + self.trainer_info_name, object=info)
save_json.save(path=self.path + self.trainer_info_json_name, obj=info)
return info
def _process_hyperparameters(self, hyperparameters: dict) -> dict:
return process_hyperparameters(hyperparameters=hyperparameters)
def _get_full_model_val_score(self, model: str) -> float:
model_full_dict_inverse = {full: orig for orig, full in self.model_full_dict.items()}
model_performances = self.get_models_attribute_dict(attribute='val_score')
normal_model = model_full_dict_inverse[model]
if normal_model not in model_performances:
# normal model is deleted
if model not in self._model_full_dict_val_score:
raise ValueError(f'_FULL model {model} had the model it was based on ({normal_model}) deleted, and the validation score was not stored.')
val_score = self._model_full_dict_val_score[model]
else:
# normal model exists
val_score = model_performances[normal_model]
return val_score
def distill(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None,
time_limit=None, hyperparameters=None, holdout_frac=None, verbosity=None,
models_name_suffix=None, teacher=None, teacher_preds='soft',
augmentation_data=None, augment_method='spunge', augment_args={'size_factor':5,'max_size':int(1e5)},
augmented_sample_weight=1.0):
""" Various distillation algorithms.
Args:
X, y: pd.DataFrame and pd.Series of training data.
If None, original training data used during predictor.fit() will be loaded.
This data is split into train/validation if X_val, y_val are None.
X_val, y_val: pd.DataFrame and pd.Series of validation data.
time_limit, hyperparameters, holdout_frac: defined as in predictor.fit()
teacher (None or str):
If None, uses the model with the highest validation score as the teacher model, otherwise use the specified model name as the teacher.
teacher_preds (None or str): If None, we only train with original labels (no data augmentation, overrides augment_method)
If 'hard', labels are hard teacher predictions given by: teacher.predict()
If 'soft', labels are soft teacher predictions given by: teacher.predict_proba()
Note: 'hard' and 'soft' are equivalent for regression problems.
If augment_method specified, teacher predictions are only used to label augmented data (training data keeps original labels).
To apply label-smoothing: teacher_preds='onehot' will use original training data labels converted to one-hots for multiclass (no data augmentation). # TODO: expose smoothing-hyperparameter.
models_name_suffix (str): Suffix to append to each student model's name, new names will look like: 'MODELNAME_dstl_SUFFIX'
augmentation_data: pd.DataFrame of additional data to use as "augmented data" (does not contain labels).
When specified, augment_method, augment_args are ignored, and this is the only augmented data that is used (teacher_preds cannot be None).
augment_method (None or str): specifies which augmentation strategy to utilize. Options: [None, 'spunge','munge']
If None, no augmentation gets applied.
}
augment_args (dict): args passed into the augmentation function corresponding to augment_method.
augmented_sample_weight (float): Nonnegative value indicating how much to weight augmented samples. This is only considered if sample_weight was initially specified in Predictor.
"""
if verbosity is None:
verbosity = self.verbosity
if teacher is None:
teacher = self._get_best()
hyperparameter_tune = False # TODO: add as argument with scheduler options.
if augmentation_data is not None and teacher_preds is None:
raise ValueError("augmentation_data must be None if teacher_preds is None")
logger.log(20, f"Distilling with teacher='{teacher}', teacher_preds={str(teacher_preds)}, augment_method={str(augment_method)} ...")
if teacher not in self.get_model_names(can_infer=True):
raise AssertionError(f"Teacher model '{teacher}' is not a valid teacher model! Either it does not exist or it cannot infer on new data.\n"
f"Valid teacher models: {self.get_model_names(can_infer=True)}")
if X is None:
if y is not None:
raise ValueError("X cannot be None when y specified.")
X = self.load_X()
X_val = self.load_X_val()
if y is None:
y = self.load_y()
y_val = self.load_y_val()
if X_val is None:
if y_val is not None:
raise ValueError("X_val cannot be None when y_val specified.")
if holdout_frac is None:
holdout_frac = default_holdout_frac(len(X), hyperparameter_tune)
X, X_val, y, y_val = generate_train_test_split(X, y, problem_type=self.problem_type, test_size=holdout_frac)
y_val_og = y_val.copy()
og_bagged_mode = self.bagged_mode
og_verbosity = self.verbosity
self.bagged_mode = False # turn off bagging
self.verbosity = verbosity # change verbosity for distillation
if self.sample_weight is not None:
X, w = extract_column(X, self.sample_weight)
if teacher_preds is None or teacher_preds == 'onehot':
augment_method = None
logger.log(20, "Training students without a teacher model. Set teacher_preds = 'soft' or 'hard' to distill using the best AutoGluon predictor as teacher.")
if teacher_preds in ['onehot','soft']:
y = format_distillation_labels(y, self.problem_type, self.num_classes)
y_val = format_distillation_labels(y_val, self.problem_type, self.num_classes)
if augment_method is None and augmentation_data is None:
if teacher_preds == 'hard':
y_pred = pd.Series(self.predict(X, model=teacher))
if (self.problem_type != REGRESSION) and (len(y_pred.unique()) < len(y.unique())): # add missing labels
logger.log(15, "Adding missing labels to distillation dataset by including some real training examples")
indices_to_add = []
for clss in y.unique():
if clss not in y_pred.unique():
logger.log(15, f"Fetching a row with label={clss} from training data")
clss_index = y[y == clss].index[0]
indices_to_add.append(clss_index)
X_extra = X.loc[indices_to_add].copy()
y_extra = y.loc[indices_to_add].copy() # these are actually real training examples
X = pd.concat([X, X_extra])
y_pred = pd.concat([y_pred, y_extra])
if self.sample_weight is not None:
w = | pd.concat([w, w[indices_to_add]]) | pandas.concat |
import os
import pandas as pd
from glob import glob
path1 = "/home/lsh/PycharmProjects/Audio/data/Male speech, man speaking"
path2 = "/home/lsh/PycharmProjects/Audio/data/Outside, rural or natural"
path3 = "/home/lsh/PycharmProjects/Audio/data/snoring"
path4 = "/home/lsh/PycharmProjects/Audio/data/Traffic noise, roadway noise"
path5 = "/home/lsh/PycharmProjects/Audio/data/Vehicle"
paths = [path1, path2, path3, path4, path5]
df = pd.DataFrame()
file_names = []
labels = []
for path in paths:
file_names += os.listdir(path)
if path == path3:
labels += [3] * len(os.listdir(path))
elif path == path1:
labels += [1] * len(os.listdir(path))
elif path == path2:
labels += [2] * len(os.listdir(path))
elif path == path4:
labels += [4] * len(os.listdir(path))
elif path == path5:
labels += [5] * len(os.listdir(path))
df["file_name"] = file_names
df["labels"] = labels
print(len(file_names), len(labels))
print(df.head())
df.to_csv("audio.csv", index=False)
df = | pd.read_csv("audio.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype
from pandas.types.common import pandas_dtype, is_dtype_equal
import pandas.util.testing as tm
class TestPandasDtype(tm.TestCase):
def test_numpy_dtype(self):
for dtype in ['M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']:
self.assertEqual(pandas_dtype(dtype), np.dtype(dtype))
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
self.assertEqual(pandas_dtype('U'), np.dtype('U'))
self.assertEqual(pandas_dtype('S'), np.dtype('S'))
def test_datetimetz_dtype(self):
for dtype in ['datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]']:
self.assertIs(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), dtype)
def test_categorical_dtype(self):
self.assertEqual(pandas_dtype('category'), CategoricalDtype())
def test_period_dtype(self):
for dtype in ['period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]']:
self.assertIs( | pandas_dtype(dtype) | pandas.types.common.pandas_dtype |
"""
This script compares different reference systems against each other.
"""
from pipeline.reference_loader import ZebrisReferenceLoader, OptogaitReferenceLoader
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from scipy import stats
import statsmodels.api as sm
from matplotlib.lines import Line2D
def reg_line(x, y):
"""
Calculate regression line for data.
Args:
x (list[float]): data x values
y (list[float]): data y values
Returns:
tuple(list[float], ...): x values of the regression line, y values of the regression line, calculated regression line parameters (gradient, intercept, r_value, p_value, std_err, rmse, mae), p-values of the statistical model, confidence interval
"""
gradient, intercept, r_value, p_value, std_err = stats.linregress(x, y)
# get p values and CI for the gradient and intercept
X = sm.add_constant(x)
model = sm.OLS(y, X)
results = model.fit()
pvalues = results.pvalues
conf_interval = results.conf_int(alpha=0.05, cols=None)
# print('p values:')
# print(results.pvalues)
# print('confidence intervals:')
# print(conf_interval)
# calculate RMSE (root mean squared error)
y_pred = gradient * x + intercept
rmse = np.sqrt(np.mean((y_pred - y) ** 2))
# make a regression line
mn = np.min(x)
mx = np.max(x + 0.5)
mn = 0
x1 = np.linspace(mn, mx, 500)
y1 = gradient * x1 + intercept
# summary line info
line_info = [
round(gradient, 4),
round(intercept, 4),
round(r_value, 4),
round(p_value, 4),
round(std_err, 4),
round(rmse, 4),
]
return x1, y1, line_info, pvalues, conf_interval
def detect_outlier(
data, column, reference_column, z_threshold=3, maximum_deviation=0.25
):
"""
Detect outliers based on z-score and maximum deviation.
Args:
data (DataFrame): DataFrame with the data
column (str): name of the column under consideration
reference_column (str): name of the reference column matching the data column
z_threshold (float): cutoff value for z-score
maximum_deviation (float): cutoff value for deviation between systems
Returns:
np.array: boolean vector indicating outliers
"""
column_diff = np.abs(data[column] - data[reference_column])
z_score_diff = np.abs(stats.zscore(column_diff))
# outlier = np.logical_or(z_score_diff > z_threshold, column_diff > maximum_deviation)
outlier = z_score_diff > z_threshold
return outlier
def draw_reg_line_and_info(data, outlier, column, reference_column, axis):
"""
Plot regression line and parameters.
Args:
data (DataFrame): DataFrame with actual data
outliers (np.array): Boolean array, indicating if the row is an outlier
column (str): name of the column under consideration
reference_column (str): name of the reference column matching the data column
axis (matplotlib.axis): axis to plot on
Returns:
None
"""
x1, y1, info, pvalues, conf_interval = reg_line(
data[np.logical_not(outlier)][column],
data[np.logical_not(outlier)][reference_column],
)
textstr = "\n".join(
(
r"$n=%i$" % (len(data[np.logical_not(outlier)]),),
r"$r=%.2f$" % (info[2],),
r"$RMSE=%.2f$" % (info[5],),
r"$y=%.2fx %+.2f$" % (info[0], info[1]),
)
)
props = dict(boxstyle="square", facecolor="white", edgecolor="white", alpha=0)
axis.text(
0.97,
0.03,
textstr,
fontsize=11,
transform=axis.transAxes,
verticalalignment="bottom",
horizontalalignment="right",
bbox=props,
)
axis.plot(x1, y1)
axis.plot(x1, x1, color="0.75")
if __name__ == "__main__":
config = {
"name": "manual_threshold",
# "raw_base_path": "./data/raw",
"raw_base_path": "./example_data_/raw",
# "interim_base_path": "./data/interim",
"interim_base_path": "./example_data/interim",
"overwrite": False,
# "dataset": "TRIPOD",
"dataset": "TRIPOD_excerpt",
"subjects": [
# "Sub_DF",
# "Sub_HA",
# "Sub_PB",
# "Sub_AL",
# "Sub_EN",
# "Sub_KP",
# "Sub_RW",
# "Sub_BK",
"Sub_FZ",
# "Sub_LU",
# "Sub_SN",
# "Sub_CP",
# "Sub_GK",
# "Sub_OD",
# "Sub_YU",
],
"runs": ["PWS", "PWS+20", "PWS-20"],
"experiment_duration": 120,
"plot_outlier": False,
}
plt.rcParams.update({"font.size": 12})
cmap = matplotlib.cm.get_cmap("jet")
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=len(config["subjects"]) - 0.99)
all_data = pd.DataFrame()
fig_sl, ax_sl = plt.subplots() # stride length
fig_st, ax_st = plt.subplots() # stride time
fig_swt, ax_swt = plt.subplots() # swing time
fig_stt, ax_stt = plt.subplots() # stance time
fig_td, ax_td = plt.subplots() # timestamp difference
for subject_id, subject in enumerate(config["subjects"]):
for run_id, run in enumerate(config["runs"]):
zebris_data = ZebrisReferenceLoader(
config["name"],
config["raw_base_path"],
config["interim_base_path"],
config["dataset"],
subject,
run,
config["overwrite"],
).get_data()
optogait_data = OptogaitReferenceLoader(
config["name"],
config["raw_base_path"],
config["interim_base_path"],
config["dataset"],
subject,
run,
config["overwrite"],
).get_data()
merged = {"left": None, "right": None}
for side in ["left", "right"]:
zebris_data[side]["timestamp_zebris"] = zebris_data[side]["timestamp"]
merged[side] = pd.merge_asof(
left=optogait_data[side],
right=zebris_data[side],
on="timestamp",
direction="nearest",
tolerance=0.1,
allow_exact_matches=True,
).dropna()
merged[side] = merged[side][
merged[side]["timestamp"] < config["experiment_duration"]
]
merged[side]["subject"] = subject_id
merged[side]["run"] = run_id
all_data = | pd.concat([all_data, merged[side]]) | pandas.concat |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
sources.py
This file will contain more general versions of data containers than
the sources defined in uncertainty_sources.py. The main class that this module
exports is the Source class which is intended to store data of any sort in
a dataframe. This class should not be modified (unless there is a bug) to be
made more specific; it should be subclassed. In addition, unless the
method will obviously change the state of the object, all methods should
produce new objects instead of modifying objects.
"""
import sys
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import prescient.util.distributions.copula as copula
from prescient.util.distributions.distributions import UnivariateEmpiricalDistribution
from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution
import prescient.gosm.derivative_patterns.derivative_patterns as dp
from prescient.gosm.markov_chains.states import State
from prescient.gosm.sources.segmenter import Criterion
power_sources = ['solar', 'wind', 'hydro']
recognized_sources = ['solar', 'wind', 'load', 'hydro']
# Default parameters for the non-required parameters for sources
defaults = {
'is_deterministic': False,
'frac_nondispatch': 1,
'scaling_factor': 1,
'forecasts_as_actuals': False,
'aggregate': False,
}
class Source:
"""
This class should act as a container for all the data related to a single
source. The data is stored internally in a Pandas Dataframe.
This class should have methods for segmentation (more generally pick
all datetimes that satisfy a certain a criterion) and selection.
Attributes:
name (str): The name of the source
data (pd.DataFrame): The internal dataframe storing all the data
source_type (str): The type of the source (wind, solar, load, etc.)
Args:
name (str): the name of the source
dataframe (pd.DataFrame): The frame containing all the data
source_type (str): the type of the source (e.g. 'solar')
"""
def __init__(self, name, dataframe, source_type):
self.name = name
self.data = dataframe
# A little validation is done here
# We check for duplicates
if dataframe.index.duplicated().any():
duplicates = dataframe.index[dataframe.index.duplicated()]
raise ValueError("Error: Source {} has duplicate datetimes at {}"
.format(name, ", ".join(map(str, duplicates))))
self.source_type = source_type.lower()
if source_type.lower() not in recognized_sources:
raise ValueError("The source type '{}' is unrecognized, the only "
"recognized sources are {}"
.format(source_type,
", ".join(recognized_sources)))
def check_for_column(self, column_name):
"""
This method will check if the source has a column with the name
specified. If it doesn't it will raise a RuntimeError.
Args:
column_name (str): The name of the column to check
"""
if column_name not in self.data.columns:
raise RuntimeError("Source {} has no '{}' column".format(
self.name, column_name))
def window(self, column_name, lower_bound=-np.inf, upper_bound=np.inf):
"""
Finds the window of data such that the column value is between
the two bounds specified. Returns a Source object with data
contained in the window. The bounds are inclusive.
Args:
column_name (str): The name of the column
lower_bound (float): The lower bound, if None, no lower bound
upper_bound (float): The upper bound, if None, no upper bound
Returns:
Source: The window of data
"""
self.check_for_column(column_name)
new_frame = self.data[(self.data[column_name] >= lower_bound) &
(self.data[column_name] <= upper_bound)]
return Source(self.name, new_frame, self.source_type)
def enumerate(self, column_name, value):
"""
Finds the window of data such that the column field is equal to the
value. Returns a Source object with the data contained in the window.
Args:
column_name (str): The name of the column
value: The value you want all datetimes to have in the new window
Returns:
Source: The data will have all rows which match value
"""
self.check_for_column(column_name)
new_frame = self.data[self.data[column_name] == value]
return Source(self.name, new_frame, self.source_type)
def rolling_window(self, day, historic_data_start=None,
historic_data_end=None):
"""
Creates a Rolling Window of data which contains a historic dataframe
and a dayahead dataframe. The historic data is all data up to the day
and the dayahead data is the data for that day.
Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)
probably works but not guaranteed. This is contingent on pandas
datetime indexing.
Args:
day (datetime.datetime): The datetime referring to hour zero of the
desired day to create a window up to that day
historic_data_start (datetime.datetime): The datetime of the start
of the historic data, if None just use start of data
historic_data_end (datetime.datetime): The datetime of the end of
the historic data, if None draws up to the day passed
Returns:
RollingWindow: The rolling window of data
"""
# If start not specified, we take the first date in dataframe
if historic_data_start is None:
historic_data_start = min(self.data.index)
# If end not specified, we take last date before the passed in day
if historic_data_end is None:
historic_data_end = day - datetime.timedelta(hours=1)
historic_frame = self.data[historic_data_start:historic_data_end]
dayahead_frame = self.data[day:day+datetime.timedelta(hours=23)]
# This suppresses warnings, This should return a copy anyways, so don't
# need a warning.
historic_frame.is_copy = False
dayahead_frame.is_copy = False
return RollingWindow(self.name, historic_frame,
self.source_type, dayahead_frame)
def solar_window(self, day, historic_data_start=None,
historic_data_end=None):
"""
Creates a SolarWindow of data which contains a historic dataframe
and a dayahead dataframe. The historic data is all data up to the day
and the dayahead data is the data for that day.
Using non-datetime objects (pd.TimeStamp, strings, np.datetime64)
probably works but not guaranteed. This is contingent on pandas
datetime indexing.
Args:
day (datetime.datetime): The datetime referring to hour zero of the
desired day to create a window up to that day
historic_data_start (datetime.datetime): The datetime of the start
of the historic data, if None just use start of data
historic_data_end (datetime.datetime): The datetime of the end of
the historic data, if None draws up to the day passed
Returns:
SolarWindow: The rolling window of data
"""
window = self.rolling_window(day, historic_data_start,
historic_data_end)
return window.solar_window()
def add_column(self, column_name, series):
"""
Adds a column of data to the dataframe. This data should be indexed
by datetime.
Args:
column_name (str): The name of the column to add
series (pd.Series or dict[datetime.datetime,value]): The data
indexed by datetime to add to the dataset
"""
self.data[column_name] = pd.Series(series)
def get_day_of_data(self, column_name, day):
"""
This function returns a pandas Series of all the data in the column
with the specific day as an index.
Args:
column_name (str): The desired column
day (datetime-like): the day, which can be coerced into
a pd.Timestamp
Returns:
pd.Series: A series of relevant data
"""
self.check_for_column(column_name)
dt = pd.Timestamp(day)
column = self.data[column_name]
return column[column.index.date == dt.date()]
def get(self, column_name, row_index):
"""
Get the value stored in column specified by column_name and the row
specified by the row_index
Args:
column_name (str): The name of the column
row_index (datetime.datetime): The datetime for which you want data
"""
self.check_for_column(column_name)
return self.data[column_name][row_index]
def get_column(self, column_name):
"""
Returns the column of data with that column name. This will also return
a column without any nan values.
Args:
column_name (str): The name of the column
Returns:
pd.Series: The requested column
"""
self.check_for_column(column_name)
return self.data[column_name].dropna()
def get_state_walk(self, state_description, class_=State):
"""
This method should walk through the datetimes and construct a sequence
of the different states of the historic data. The specification for
what constitutes a state is passed in the state_description argument.
Args:
state_description (StateDescription): Specification for determining
what the state for each datetime is
class_ (Class): The type of state you wish to instantiate
Returns:
A dictionary of mapping datetimes to states constituting the walk
"""
states = {}
names = state_description.keys()
for dt in self.data.index:
name_value_mapping = {name: self.get(name, dt) for name in names}
states[dt] = state_description.to_state(class_,
**name_value_mapping)
return states
def get_state(self, state_description, dt, class_=State):
"""
This method should create the state for a specific datetime.
The specification for what constitutes a state is passed in
the state_description argument.
Args:
state_description (StateDescription): Specification for determining
what the state for each datetime is
dt (datetime.datetime): The relevant datetime
class_ (Class): The type of state you wish to instantiate
Returns:
State: The state of the datetime
"""
dt = | pd.Timestamp(dt) | pandas.Timestamp |
# AUTOGENERATED! DO NOT EDIT! File to edit: utilities.ipynb (unless otherwise specified).
__all__ = ['make_codes', 'make_data', 'get_rows', 'extract_codes', 'Info', 'memory', 'listify', 'reverse_dict',
'del_dot', 'del_zero', 'expand_hyphen', 'expand_star', 'expand_colon', 'expand_regex', 'expand_code',
'expand_columns', 'format_codes', 'insert_external', 'unique', 'count_codes', 'lookup_codes', 'get_codes']
# Cell
import re
import numpy as np
import pandas as pd
from functools import singledispatch
# Cell
def make_codes(n=100, letters=26, numbers=100, seed=False):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
"""
# each code is assumed to consist of a letter and a number
alphabet = list('abcdefghigjklmnopqrstuvwxyz')
letters=alphabet[:letters+1]
# make random numbers same if seed is specified
if seed:
np.random.seed(0)
# determine the number of codes to be drawn for each event
n_codes=np.random.negative_binomial(1, p=0.3, size=n)
# avoid zero (all events have to have at least one code)
n_codes=n_codes+1
# for each event, randomly generate a the number of codes specified by n_codes
codes=[]
for i in n_codes:
diag = [np.random.choice(letters).upper()+
str(int(np.random.uniform(low=1, high=numbers)))
for num in range(i)]
code_string=','.join(diag)
codes.append(code_string)
# create a dataframe based on the list
df=pd.DataFrame(codes)
df.columns=['code']
return df
# Cell
def make_data(n=100, letters=26, numbers=100, seed=False, expand=False,
columns=['pid', 'gender', 'birth_date', 'date', 'region', 'codes']):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
Examples
>>>df = make_data(n=100, letters=5, numbers=5, seed=True)
"""
if seed:
np.random.seed(seed=seed)
pid = range(n)
df_person=pd.DataFrame(index = pid)
#female = np.random.binomial(1, 0.5, size =n)
gender = np.random.choice(['male', 'female'], size=n)
region = np.random.choice(['north', 'south', 'east', 'west'], size=n)
birth_year = np.random.randint(1920, 2019, size=n)
birth_month = np.random.randint(1,12, size=n)
birth_day = np.random.randint(1,28, size=n) # ok, I know!
events_per_year = np.random.poisson(1, size=n)
years = 2020 - birth_year
events = years * events_per_year
events = np.where(events==0,1,events)
events = events.astype(int)
all_codes=[]
codes = [all_codes.extend(make_codes(n=n, letters=letters,
numbers=numbers,
seed=seed)['code'].tolist())
for n in events]
days_alive = (2020 - birth_year) *365
days_and_events = zip(days_alive.tolist(), events.tolist())
all_days=[]
days_after_birth = [all_days.extend(np.random.randint(0, max_day, size=n)) for max_day, n in days_and_events]
pid_and_events = zip(list(pid), events.tolist())
all_pids=[]
pids = [all_pids.extend([p+1]*e) for p, e in pid_and_events]
df_events = pd.DataFrame(index=all_pids)
df_events['codes'] = all_codes
df_events['days_after'] = all_days
#df_person['female'] = female
df_person['gender'] = gender
df_person['region'] = region
df_person['year'] = birth_year
df_person['month'] = birth_month
df_person['day'] = birth_day
df = df_events.merge(df_person, left_index=True, right_index=True)
df['birth_date'] = pd.to_datetime(df[['year', 'month', 'day']])
df['date'] = df['birth_date'] + | pd.to_timedelta(df.days_after, unit='d') | pandas.to_timedelta |
import itertools
from typing import Dict, List, Set, Sequence
import pandas as pd
from sudoku_solver.board.houses import Row, Column, Block, House, HouseType
from sudoku_solver.board.cell import Cell
from sudoku_solver.board.observable import SudokuObservable
from sudoku_solver.board.board_constants import HORIZONTAL_INDEXES, VERTICAL_INDEXES
from sudoku_solver.shared.puzzle import SudokuPuzzle
class Board(SudokuObservable):
def __init__(self, prefilled_values: SudokuPuzzle):
super().__init__()
# make sure they're sorted
self.rows: Dict[int, Row] = {y: Row(y=y) for y in range(1, 10)}
self.columns: Dict[int, Column] = {x: Column(x=x) for x in range(1, 10)}
self.blocks: Dict[int, Block] = {z: Block(z=z) for z in range(1, 10)}
self._initialize_cells(prefilled_values=prefilled_values)
def _initialize_cells(self, prefilled_values: SudokuPuzzle):
# loop at all cell positions and instantiate all cells
cells = []
for position in itertools.product(HORIZONTAL_INDEXES, VERTICAL_INDEXES):
cell = Cell(x=position[1],
y=position[0],
prefilled_value=prefilled_values.get(x=position[1], y=position[0]))
cells.append(cell)
# fill cells into their respective row, column, and block
# also attach the container to the cell for cross-navigation
for cell in cells:
self.rows[cell.y].initialize_cell(cell)
self.columns[cell.x].initialize_cell(cell)
self.blocks[cell.z].initialize_cell(cell)
assert len(self.rows) == 9
assert len(self.columns) == 9
assert len(self.blocks) == 9
def get_cell(self, y: int, x: int) -> Cell:
all_cells = self.get_cells()
cells = [c for c in all_cells if c.x == x and c.y == y]
assert len(cells) == 1
return cells[0]
def substribe_observer_to_cells(self,
observer_finishing_value: callable,
observer_invalidating_candidate: callable):
"""subscribe an observer (GUI) to all cells. They will emit changes to candidates
and invalidated candidates"""
for cell in self.get_cells():
cell.subscribe_to_finished_values(observer=observer_finishing_value)
cell.subscribe_to_invalidated_candidates(observer=observer_invalidating_candidate)
def as_df(self, mode='candidates') -> pd.DataFrame:
series = {}
for x, column in self.columns.items():
series[x] = column.as_series(mode=mode)
return | pd.DataFrame(data=series) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy
import pandas
import matplotlib.pyplot as plt
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
class Prediction :
def __init__(self):
self.length_of_sequences = 10
self.in_out_neurons = 1
self.hidden_neurons = 300
def load_data(self, data, n_prev=10):
X, Y = [], []
for i in range(len(data) - n_prev):
X.append(data.iloc[i:(i+n_prev)].as_matrix())
Y.append(data.iloc[i+n_prev].as_matrix())
retX = numpy.array(X)
retY = numpy.array(Y)
return retX, retY
def create_model(self) :
model = Sequential()
model.add(LSTM(self.hidden_neurons,
batch_input_shape=(None, self.length_of_sequences, self.in_out_neurons),
return_sequences=False))
model.add(Dense(self.in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mape", optimizer="adam")
return model
def train(self, X_train, y_train) :
model = self.create_model()
# 学習
model.fit(X_train, y_train, batch_size=10, nb_epoch=100)
return model
if __name__ == "__main__":
prediction = Prediction()
# データ準備
data = None
brand_num = 1332
for year in range(2014, 2018):
data_ = pandas.read_csv('../stock_data/adopted_nikkeiheikin/{}/only_closing_stock_data_with_date/only_closing_stock_data_with_date_{}.csv'.format(year, brand_num))
#data_ = pandas.read_csv('csv/indices_I101_1d_' + str(year) + '.csv')
data = data_ if (data is None) else pandas.concat([data, data_])
#data.columns = ['date', 'close']
data['date'] = pandas.to_datetime(data['date'])
# 終値のデータを標準化
data['close'] = preprocessing.scale(data['closing price'])
data = data.sort_values(by='date')
data = data.reset_index(drop=True)
data = data.loc[:, ['date', 'close']]
# 2割をテストデータへ
split_pos = int(len(data) * 0.9)
x_train, y_train = prediction.load_data(data[['close']].iloc[0:split_pos], prediction.length_of_sequences)
x_test, y_test = prediction.load_data(data[['close']].iloc[split_pos:], prediction.length_of_sequences)
print("a : {}\nb : {}\nc : {}\nd : {}".format(x_train, y_train, x_test, y_test))
exit()
model = prediction.train(x_train, y_train)
predicted = model.predict(x_test)
result = | pandas.DataFrame(predicted) | pandas.DataFrame |
'''
Multiple agent for anomaly detection file
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from tensorflow.keras import optimizers
from keras import backend as K
import json
from sklearn.utils import shuffle
import os
import sys
import time
'''
Data class processing
'''
class data_cls:
def __init__(self,train_test,**kwargs):
col_names = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land_f","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","labels","dificulty"]
self.index = 0
# Data formated path and test path.
self.loaded = False
self.train_test = train_test
self.train_path = kwargs.get('train_path', '../../datasets/NSL/KDDTrain+.txt')
self.test_path = kwargs.get('test_path','../../datasets/NSL/KDDTest+.txt')
self.formated_train_path = kwargs.get('formated_train_path',
"../../datasets/formated/formated_train_adv.data")
self.formated_test_path = kwargs.get('formated_test_path',
"../../datasets/formated/formated_test_adv.data")
self.attack_types = ['normal','DoS','Probe','R2L','U2R']
self.attack_names = []
self.attack_map = { 'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': '<PASSWORD>',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
self.all_attack_names = list(self.attack_map.keys())
formated = False
# Test formated data exists
if os.path.exists(self.formated_train_path) and os.path.exists(self.formated_test_path):
formated = True
self.formated_dir = "../../datasets/formated/"
if not os.path.exists(self.formated_dir):
os.makedirs(self.formated_dir)
# If it does not exist, it's needed to format the data
if not formated:
''' Formating the dataset for ready-2-use data'''
self.df = pd.read_csv(self.train_path,sep=',',names=col_names,index_col=False)
if 'dificulty' in self.df.columns:
self.df.drop('dificulty', axis=1, inplace=True) #in case of difficulty
data2 = pd.read_csv(self.test_path,sep=',',names=col_names,index_col=False)
if 'dificulty' in data2:
del(data2['dificulty'])
train_indx = self.df.shape[0]
frames = [self.df,data2]
self.df = pd.concat(frames)
# Dataframe processing
self.df = pd.concat([self.df.drop('protocol_type', axis=1), pd.get_dummies(self.df['protocol_type'])], axis=1)
self.df = pd.concat([self.df.drop('service', axis=1), pd.get_dummies(self.df['service'])], axis=1)
self.df = pd.concat([self.df.drop('flag', axis=1), pd.get_dummies(self.df['flag'])], axis=1)
# 1 if ``su root'' command attempted; 0 otherwise
self.df['su_attempted'] = self.df['su_attempted'].replace(2.0, 0.0)
# One hot encoding for labels
self.df = pd.concat([self.df.drop('labels', axis=1),
pd.get_dummies(self.df['labels'])], axis=1)
# Normalization of the df
#normalized_df=(df-df.mean())/df.std()
for indx,dtype in self.df.dtypes.iteritems():
if dtype == 'float64' or dtype == 'int64':
if self.df[indx].max() == 0 and self.df[indx].min()== 0:
self.df[indx] = 0
else:
self.df[indx] = (self.df[indx]-self.df[indx].min())/(self.df[indx].max()-self.df[indx].min())
# Save data
test_df = self.df.iloc[train_indx:self.df.shape[0]]
test_df = shuffle(test_df,random_state=np.random.randint(0,100))
self.df = self.df[:train_indx]
self.df = shuffle(self.df,random_state=np.random.randint(0,100))
test_df.to_csv(self.formated_test_path,sep=',',index=False)
self.df.to_csv(self.formated_train_path,sep=',',index=False)
# Create a list with the existent attacks in the df
for att in self.attack_map:
if att in self.df.columns:
# Add only if there is exist at least 1
if np.sum(self.df[att].values) > 1:
self.attack_names.append(att)
def get_shape(self):
if self.loaded is False:
self._load_df()
self.data_shape = self.df.shape
# stata + labels
return self.data_shape
''' Get n-rows from loaded data
The dataset must be loaded in RAM
'''
def get_batch(self,batch_size=100):
if self.loaded is False:
self._load_df()
# Read the df rows
indexes = list(range(self.index,self.index+batch_size))
if max(indexes)>self.data_shape[0]-1:
dif = max(indexes)-self.data_shape[0]
indexes[len(indexes)-dif-1:len(indexes)] = list(range(dif+1))
self.index=batch_size-dif
batch = self.df.iloc[indexes]
else:
batch = self.df.iloc[indexes]
self.index += batch_size
labels = batch[self.attack_names]
batch = batch.drop(self.all_attack_names,axis=1)
return batch,labels
def get_full(self):
if self.loaded is False:
self._load_df()
labels = self.df[self.attack_names]
batch = self.df.drop(self.all_attack_names,axis=1)
return batch,labels
''' Get n-row batch from the dataset
Return: df = n-rows
labels = correct labels for detection
Sequential for largest datasets
'''
# def get_sequential_batch(self, batch_size=100):
# if self.loaded is False:
# self.df = pd.read_csv(self.formated_path,sep=',', nrows = batch_size)
# self.loaded = True
# else:
# self.df = pd.read_csv(self.formated_path,sep=',', nrows = batch_size,
# skiprows = self.index)
#
# self.index += batch_size
#
# labels = self.df[self.attack_types]
# for att in self.attack_names:
# if att in self.df.columns:
# del(self.df[att])
# return self.df,labels
def _load_df(self):
if self.train_test == 'train':
self.df = pd.read_csv(self.formated_train_path,sep=',') # Read again the csv
else:
self.df = | pd.read_csv(self.formated_test_path,sep=',') | pandas.read_csv |
"""
Copyright 2020 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than_or_equal_to
from brightics.common.exception import BrighticsFunctionException
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
from .short_text_topic_modeling_gsdmm import gsdmm_rwalk
import functools
import pyLDAvis
def gsdmm(table, **params):
check_required_parameters(_gsdmm, params, ['table'])
params = get_default_from_parameters_if_required(params, _gsdmm)
param_validation_check = [greater_than_or_equal_to(params, 2, 'K'),
greater_than_or_equal_to(params, 0.0, 'alpha'),
greater_than_or_equal_to(params, 0.0, 'beta'),
greater_than_or_equal_to(params, 1, 'max_iter'),
greater_than_or_equal_to(params, 1, 'num_topic_words')]
validate(*param_validation_check)
return _gsdmm(table, **params)
def _count_to_ratio_raw(word_count):
if not word_count:
return {}
else:
word_count_list = word_count.items()
words = [pair[0] for pair in word_count_list]
counts = [[pair[1]] for pair in word_count_list]
counts_normalized = normalize(counts, norm='l1', axis=0)
word_ratio_raw = {word: ratio[0] for word, ratio in zip(words, counts_normalized)}
return word_ratio_raw
def _gen_table(word_ratio_raw, num_topic_words):
if not word_ratio_raw:
return [""]
else:
word_ratio_sorted = sorted(word_ratio_raw.items(), key=lambda item: item[1], reverse=True)
word_ratio = [["{}: {}".format(word, ratio), word, ratio] for word, ratio in word_ratio_sorted]
return np.transpose(word_ratio[:num_topic_words]).tolist()
def _gsdmm(table, input_col, topic_name='topic', K=10, alpha=0.1, beta=0.1, max_iter=50, num_topic_words=3):
docs = np.array(table[input_col])
docs_set = [set(doc) for doc in docs]
docs_preprocessed = [list(doc_set) for doc_set in docs_set]
vocab_set = list(set.union(*docs_set))
vocab_size = len(vocab_set)
# initialize and train a GSDMM model
mgp = gsdmm_rwalk.MovieGroupProcess(K=K, alpha=alpha, beta=beta, n_iters=max_iter)
topics = mgp.fit(docs_preprocessed, vocab_size)
# generate topic table
topic_word_count = mgp.cluster_word_distribution
topic_words_raw = [[ind, _count_to_ratio_raw(word_count)]
for ind, word_count in enumerate(topic_word_count) if word_count]
topic_words = [[item[0]] + _gen_table(item[1], num_topic_words) for item in topic_words_raw]
# reset topic ids
nonempty_topic_indices = [item[0] for item in topic_words]
reset_topic_ind = {old_ind: (new_ind + 1) for new_ind, old_ind in enumerate(nonempty_topic_indices)}
topics = [reset_topic_ind[old_ind] for old_ind in topics]
topic_words = [[reset_topic_ind[old_item[0]]] + old_item[1:] for old_item in topic_words]
# generate output dataframes
out_table = pd.DataFrame.copy(table, deep=True)
if topic_name in table.columns:
raise BrighticsFunctionException.from_errors(
[{'0100': "Existing table contains the topic column name. Please choose another name."}])
out_table[topic_name] = topics
columns = ['index', 'vocabularies_weights', 'vocabularies', 'weights']
topic_table = | pd.DataFrame(topic_words, columns=columns) | pandas.DataFrame |
import pandas as pd
from sklearn.model_selection import train_test_split
print('Preprocessing csvs...')
# load data
path = 'data/{}_paired.tsv'
train_path = path.format('train')
val_path = path.format('dev')
test_path = path.format('test')
train_df = pd.read_table(train_path)
val_df = | pd.read_table(val_path) | pandas.read_table |
import pandas as pd
import numpy as np
import requests
import warnings
import scipy as sp
from scipy import stats
try:
import sklearn
except ImportError:
sklearn = False
else:
from sklearn.decomposition import PCA
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import filters, process
from .utils import get_protein_id
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
def pca(df, n_components=2, mean_center=False, **kwargs):
"""
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PCA analysis')
from sklearn.decomposition import PCA
df = df.copy()
# We have to zero fill, nan errors in PCA
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
pca = PCA(n_components=n_components, **kwargs)
pca.fit(df.values.T)
scores = pd.DataFrame(pca.transform(df.values.T)).T
scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(pca.components_).T
weights.index = df.index
weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])]
return scores, weights
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
sxa, _ = df.columns.get_loc_level(a)
sxb, _ = df.columns.get_loc_level(b)
dfa = df.iloc[:, sxa]
dfb = df.iloc[:, sxb]
dff = pd.concat([dfa, dfb], axis=1)
y = np.ones(dff.shape[1])
y[np.arange(dfa.shape[1])] = 0
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(dff.values.T, y)
# Apply the generated model to the original data
x_scores = plsr.transform(df.values.T)
scores = pd.DataFrame(x_scores.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = pd.DataFrame(plsr.x_loadings_)
loadings.index = df.index
loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])]
return scores, weights, loadings
def plsr(df, v, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Regression Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a partial least squares regression (PLS-R) on the supplied dataframe ``df``
against the provided continuous variable ``v``, selecting the first ``n_components``.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param v: Continuous variable to perform regression against
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLS-R scores n_components x n_samples
weights ``DataFrame`` of PLS-R weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
#TODO: Extract values if v is DataFrame?
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(df.values.T, v)
scores = pd.DataFrame(plsr.x_scores_.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = pd.DataFrame(plsr.x_loadings_)
loadings.index = df.index
loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])]
#r = plsr.score(df.values.T, v)
predicted = plsr.predict(df.values.T)
return scores, weights, loadings, predicted
def _non_zero_sum(df):
# Following is just to build the template; actual calculate below
dfo = df.sum(axis=0, level=0)
for c in df.columns.values:
dft = df[c]
dfo[c] = dft[ dft > 0].sum(axis=0, level=0)
return dfo
def enrichment_from_evidence(dfe, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from evidence.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of evidence
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfe = dfe.reset_index().set_index('Experiment')
dfe['Modifications'] = np.array([modification in m for m in dfe['Modifications']])
dfe = dfe.set_index('Modifications', append=True)
dfes = dfe.sum(axis=0, level=[0,1]).T
columns = dfes.sum(axis=1, level=0).columns
total = dfes.sum(axis=1, level=0).values.flatten() # Total values
modified = dfes.iloc[0, dfes.columns.get_level_values('Modifications').values ].values # Modified
enrichment = modified / total
return pd.DataFrame([enrichment], columns=columns, index=['% Enrichment'])
def enrichment_from_msp(dfmsp, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of modificationSpecificPeptides
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfmsp['Modifications'] = np.array([modification in m for m in dfmsp['Modifications']])
dfmsp = dfmsp.set_index(['Modifications'])
dfmsp = dfmsp.filter(regex='Intensity ')
dfmsp[ dfmsp == 0] = np.nan
df_r = dfmsp.sum(axis=0, level=0)
modified = df_r.loc[True].values
total = df_r.sum(axis=0).values
enrichment = modified / total
return pd.DataFrame([enrichment], columns=dfmsp.columns, index=['% Enrichment'])
def sitespeptidesproteins(df, site_localization_probability=0.75):
"""
Generate summary count of modified sites, peptides and proteins in a processed dataset ``DataFrame``.
Returns the number of sites, peptides and proteins as calculated as follows:
- `sites` (>0.75; or specified site localization probability) count of all sites > threshold
- `peptides` the set of `Sequence windows` in the dataset (unique peptides)
- `proteins` the set of unique leading proteins in the dataset
:param df: Pandas ``DataFrame`` of processed data
:param site_localization_probability: ``float`` site localization probability threshold (for sites calculation)
:return: ``tuple`` of ``int``, containing sites, peptides, proteins
"""
sites = filters.filter_localization_probability(df, site_localization_probability)['Sequence window']
peptides = set(df['Sequence window'])
proteins = set([str(p).split(';')[0] for p in df['Proteins']])
return len(sites), len(peptides), len(proteins)
def modifiedaminoacids(df):
"""
Calculate the number of modified amino acids in supplied ``DataFrame``.
Returns the total of all modifications and the total for each amino acid individually, as an ``int`` and a
``dict`` of ``int``, keyed by amino acid, respectively.
:param df: Pandas ``DataFrame`` containing processed data.
:return: total_aas ``int`` the total number of all modified amino acids
quants ``dict`` of ``int`` keyed by amino acid, giving individual counts for each aa.
"""
amino_acids = list(df['Amino acid'].values)
aas = set(amino_acids)
quants = {}
for aa in aas:
quants[aa] = amino_acids.count(aa)
total_aas = len(amino_acids)
return total_aas, quants
def go_enrichment(df, enrichment='function', organism='Homo sapiens', summary=True, fdr=0.05, ids_from=['Proteins','Protein IDs']):
"""
Calculate gene ontology (GO) enrichment for a specified set of indices, using the PantherDB GO enrichment service.
Provided with a processed data ``DataFrame`` will calculate the GO ontology enrichment specified by `enrichment`,
for the specified `organism`. The IDs to use for genes are taken from the field `ids_from`, which by default is
compatible with both proteinGroups and modified peptide tables. Setting the `fdr` parameter (default=0.05) sets
the cut-off to use for filtering the results. If `summary` is ``True`` (default) the returned ``DataFrame``
contains just the ontology summary and FDR.
:param df: Pandas ``DataFrame`` to
:param enrichment: ``str`` GO enrichment method to use (one of 'function', 'process', 'cellular_location', 'protein_class', 'pathway')
:param organism: ``str`` organism name (e.g. "Homo sapiens")
:param summary: ``bool`` return full, or summarised dataset
:param fdr: ``float`` FDR cut-off to use for returned GO enrichments
:param ids_from: ``list`` of ``str`` containing the index levels to select IDs from (genes, protein IDs, etc.) default=['Proteins','Protein IDs']
:return: Pandas ``DataFrame`` containing enrichments, sorted by P value.
"""
if isinstance(df, pd.DataFrame) or isinstance(df, pd.Series):
l = list(set(ids_from) & set(df.index.names))[0]
data = "\n".join([get_protein_id(s) for s in df.index.get_level_values(l)])
else:
data = "\n".join([get_protein_id(s) for s in df])
r = requests.post("http://www.pantherdb.org/webservices/garuda/tools/enrichment/VER_2/enrichment.jsp", data={
'organism': organism,
'type': 'enrichment',
'enrichmentType': enrichment},
files = {
'geneList': ('genelist.txt', StringIO(data) ),
}
)
try:
go = pd.read_csv(StringIO(r.text), sep='\t', skiprows=5, lineterminator='\n', header=None)
except ValueError:
return None
go.columns = ["GO", "Name", "Gene ID", "P", "FDR"]
go = go.set_index(["GO", "Name"])
if summary:
go = go.drop("Gene ID", axis=1).mean(axis=0, level=["GO","Name"])
if fdr:
go = go[ go["P"] < fdr ]
return go.sort_values(by="P", ascending=True)
def anova_1way(df, *args, **kwargs):
"""
Perform Analysis of Variation (ANOVA) on provided dataframe
and for specified groups. Groups for analysis can be specified
as individual arguments, e.g.
anova(df, "Group A", "Group B")
anova(df, ("Group A", 5), ("Group B", 5))
At least 2 groups must be provided.
:return: Dataframe containing selected groups and P/T/sig value for the comparisons.
"""
if len(args) < 2:
raise Exception("Not enough arguments. Provide at least two group/indexes for comparison.")
# Select columns for test
df = df[list(args)]
fdr = kwargs.get('fdr', 0.05)
pv = []
tv = []
for n in range(df.shape[0]):
data = []
for idx in args:
dv = df.iloc[n][idx].values
dv = np.ma.masked_where(np.isnan(dv), dv)
data.append(dv)
# Calculate the p value between two groups (t-test)
t, p = sp.stats.mstats.f_oneway(*data)
pv.append(p)
tv.append(t)
df['ANOVA p'] = pv
df['ANOVA t'] = tv
df['ANOVA sig'] = np.array(pv) <= fdr
return df
def ttest_1samp(df, is_log2=False, fillna=None):
"""
Perform 1 sample t-test on a dataframe df1
The mean of
each group is calculated along the y-axis (per protein) and used to generate a log2 ratio. If a log2-transformed
dataset is supplied set `islog2=True` (a warning will be given when negative values are present).
:param df: Pandas `dataframe`
:param is_log2: `bool` is the data log2 transformed already?
:param fillna: `float` fill NaN values with value (default: 0)
:return:
"""
df = df.copy()
if np.any(df.values[~ | pd.isnull(df.values) | pandas.isnull |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = | pd.merge(dataset2,d,on='coupon_id',how='left') | pandas.merge |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], | Period('2007-01-01 10:11:12.123456Z', 'U') | pandas.Period |
import os
import numpy as np
import math
import pandas as pd
import sys
import joblib
import sklearn.metrics
from math import sqrt
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
base_path = os.path.dirname(os.path.abspath(__file__))
#'''/home/arakka/visibility-china/preprocessing/src/models'''
def main():
if len(sys.argv) < 2:
print ('Number of arguments:',len(sys.argv))
print (str(sys.argv[1]))
print ('Usage: ./predict_model.py input_test_lcn')
sys.exit(0)
st=str(sys.argv[1])
stn=st[0:4]
#load data
data = | pd.read_csv(base_path+"/../../data/test_data/"+stn+".csv", sep=",") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = | DataFrame([[123, 456], [456, 789]]) | pandas.DataFrame |
import os
import logging
import click
import pandas as pd
from bto_sbf_webscraper.scraper import (
get_available_flats,
scrape,
)
from . import __version__
os.environ["WDM_LOG_LEVEL"] = str(logging.WARNING)
@click.command()
@click.option(
"--selection_type",
"-s",
default="BTO",
help="Choose between BTO / SBF / OBF",
show_default=True,
)
@click.option(
"--launch_date",
"-l",
help="Choose your launch date",
show_default=True,
)
@click.option(
"--town",
"-t",
help="Choose your town",
show_default=True,
)
@click.option(
"--flat_type",
"-f",
help="Choose your flat type (2, 3, 4, 5, All)",
show_default=True,
)
@click.version_option(version=__version__)
def main(selection_type, launch_date, town, flat_type):
"""Test Project"""
if selection_type and (not launch_date and not town and not flat_type):
flats_available = get_available_flats(selection_type)
print_flats(flats_available)
if flats_available:
launch_date = get_launch_date(flats_available)
flat_type = get_flat_types(launch_date, flats_available)
town = get_town(launch_date, flat_type, flats_available)
data = scrape(selection_type, launch_date, flat_type, town)
file_name = f"{selection_type}_{launch_date}_{flat_type if flat_type else 'All'} Room_{town if town else 'All'}.csv".replace(
"/", "-"
)
| pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from random import uniform,randint,sample
import datetime
import math
temperature_percage = 70
frottement_foret = 0.5
vitesse_percage = 1500
temperature_eau_refroidissement = 10
long_dataset = 28800*20
def choose():
L = [0,1,2,3]
return sample(L,3)
def fonction_pos_tp(x):
#plus 10 pour temperature_percage
return x*0.001737 + uniform(-2,2)
def fonction_neg_tp(x):
#moins 10 pour temperature_percage
return x*(-1*0.001737) + uniform(-2,2)
def fonction_pos_ff(x):
#plus 0.4 pour temperature_percage
return x*0.00007 + uniform(-0.1,0.1)
def fonction_neg_ff(x):
#moins 0,4 pour temperature_percage
return x*(-0.00007) + uniform(-0.1,0.1)
def fonction_pos_vp(x):
#plus 1500 pour temperature_percage
return x*0.0864 + uniform(-50,50)
def fonction_neg_vp(x):
#moins 1000 pour temperature_percage
return x*(-1*0.0864) + uniform(-50,50)
def fonction_pos_ter(x):
#plus 5 pour temperature_percage
return x*(0.000868) + uniform(-1,1)
def fonction_neg_ter(x):
#moins 5 pour temperature_percage
return x*(-1*0.000868) + uniform(-1,1)
def generate_dataset(lg_ds):
Temps = [datetime.datetime(2010,1,1,0,0,0)]
Temp_perc = [temperature_percage]
Frot_foret = [frottement_foret]
Vitesse_perc = [vitesse_percage]
Temp_refr = [temperature_eau_refroidissement]
Target = [0]
for i in range(lg_ds):
if ((i % 28800) == 0):
a = i + randint(1, 23040)
if (i == a):
b = choose()
c1 = randint(0, 1)
c2 = randint(0, 1)
c3 = randint(0, 1)
c4 = randint(0, 1)
for j in range(5760):
Temps.append(Temps[-1] + datetime.timedelta(minutes=5))
if ((0 in b) and (c1 == 0)):
Temp_perc.append(temperature_percage + fonction_pos_tp(j))
elif ((0 in b) and (c1 == 1)):
Temp_perc.append(temperature_percage + fonction_neg_tp(j))
else :
Temp_perc.append(temperature_percage + uniform(-2,2))
if ((1 in b) and (c2 == 0)):
Frot_foret.append(frottement_foret + fonction_pos_ff(j))
elif ((1 in b) and (c2 == 1)):
Frot_foret.append(frottement_foret + fonction_neg_ff(j))
else :
Frot_foret.append(frottement_foret + uniform(-0.1,0.1))
if ((2 in b) and (c3 == 0)):
Vitesse_perc.append(vitesse_percage + fonction_pos_vp(j))
elif ((2 in b) and (c3 == 1)):
Vitesse_perc.append(vitesse_percage + fonction_neg_vp(j))
else :
Vitesse_perc.append(vitesse_percage + uniform(-50,50))
if ((3 in b) and (c4 == 0)):
Temp_refr.append(temperature_eau_refroidissement + fonction_pos_ter(j))
elif ((3 in b) and (c4 == 1)):
Temp_refr.append(temperature_eau_refroidissement + fonction_neg_ter(j))
else :
Temp_refr.append(temperature_eau_refroidissement + uniform(-1,1))
if (j > 1000):
Target.append(1)
else :
Target.append(0)
else :
Temps.append(Temps[-1] + datetime.timedelta(minutes=5))
Temp_perc.append(temperature_percage + 2*(np.sin(i/(288))) + uniform(-0.4, 0.4))
Frot_foret.append(frottement_foret + uniform(-0.1,0.1))
Vitesse_perc.append(vitesse_percage + 50*(np.sin(i/(288))) + uniform(-10, 10))
Temp_refr.append(temperature_eau_refroidissement + np.cos(i/(288)) + uniform(-0.2,0.2))
Target.append(0)
df = | pd.DataFrame([Temps,Temp_perc,Frot_foret,Vitesse_perc,Temp_refr,Target]) | pandas.DataFrame |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--year=VOC2012 \
--output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
import contextlib2
from object_detection.dataset_tools import tf_record_creation_util
import numpy as np
import pandas as pd
from numpy.random import RandomState
import cv2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('set', 'train', 'Convert training set, validation set or '
'merged set.')
flags.DEFINE_string('annotations_dir', 'Annotations',
'(Relative) path to annotations directory.')
flags.DEFINE_string('year', 'VOC2007', 'Desired challenge year.')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('label_map_path', 'object_detection/data/pascal_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
SETS = ['train', 'val', 'trainval', 'test']
YEARS = ['VOC2007', 'VOC2012', 'merged']
def dict_to_tf_example(data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(data['folder'], data['filename']+'.jpg')
#os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
#print("full_path", full_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
brands[obj['name']]=brands[obj['name']]+1
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
if (obj['name'] in label_map_dict.keys()):
classes.append(label_map_dict[obj['name']])
else:
print("WARNING",full_path)
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
examples_list_number_classes_text[os.path.splitext(full_path)[0]]=len(classes_text)
examples_list_number_classes[os.path.splitext(full_path)[0]]=len(classes)
logging.info(xmin,ymin,xmax,ymax,classes_text,classes,poses,data['folder'], data['filename'])
#print(xmin,ymin,xmax,ymax,classes_text,classes,poses,data['folder'], data['filename'])
#print(xmin,ymin,xmax,ymax)
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['folder']+'/'+data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['folder']+'/'+data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
if (examples_list_number_classes_text[os.path.splitext(full_path)[0]]!=examples_list_number_classes[os.path.splitext(full_path)[0]]):
print(full_path,example)
if ((data['folder']+'/'+data['filename']=='nike/img000105') or (len(classes_text) ==0)):
#logging.info(example)
#print(full_path,example)
print(full_path,examples_list_number_classes_text[os.path.splitext(full_path)[0]],examples_list_number_classes[os.path.splitext(full_path)[0]])
# extract pre-trained face detector
#face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# load color (BGR) image
img = cv2.imread(full_path)
# convert BGR image to grayscale
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## find faces in image
#faces = face_cascade.detectMultiScale(gray)
# print number of faces detected in the image
#print('Number of faces detected:', len(faces))
## get bounding box for each detected face
#for (x,y,w,h) in (xmin,ymin,xmax,ymax):
# # add bounding box to color image
#cv2.rectangle(img,(int(xmin[0]),int(ymin[0])),(int(xmax[0]),int(ymax[0])),(255,0,0),2)
cv2.rectangle(img,(int(xmin[0]*width),int(ymin[0])),(int(xmax[0]),int(ymax[0])),(255,0,0),2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#cv2.imshow("Image",cv_rgb)
# display the image, along with bounding box
#plt.imshow(cv_rgb)
#plt.show()
return example
def main(_):
if FLAGS.set not in SETS:
raise ValueError('set must be in : {}'.format(SETS))
if FLAGS.year not in YEARS:
raise ValueError('year must be in : {}'.format(YEARS))
data_dir = FLAGS.data_dir
#years = ['VOC2007', 'VOC2012']
#if FLAGS.year != 'merged':
# years = [FLAGS.year]
output_train_path = os.path.join(FLAGS.output_path,"pascal3_train.record")
output_val_path = os.path.join(FLAGS.output_path,"pascal3_val.record")
#writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
writer_train = tf.python_io.TFRecordWriter(output_train_path)
writer_val = tf.python_io.TFRecordWriter(output_val_path)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
brands= {}.fromkeys(label_map_dict.keys(),0)
print('Reading from PASCAL dataset.')
examples_path = os.path.join(data_dir)#(data_dir, year, 'ImageSets', 'Main','aeroplane_' + FLAGS.set + '.txt')
examples_list=[os.path.splitext(os.path.join(root, name))[0]
for root, dirs, files in os.walk(examples_path)
for name in files
if name.endswith(("jpg"))]
print(len(examples_list),examples_list)
indices = list(range(len(examples_list)))
num_training_instances = int(0.8 * len(examples_list))
random_state=1234567890
rs = np.random.RandomState(random_state)
rs.shuffle(indices)
train_indices = indices[:num_training_instances]
val_indices = indices[num_training_instances:]
#print("indices",indices)
# split the actual data
examples_list_train, examples_list_val = list(pd.DataFrame(examples_list).iloc[train_indices].values.flatten()), list( | pd.DataFrame(examples_list) | pandas.DataFrame |
import unittest, os
import pandas as pd
import pandas.util.testing as pdt
import numpy as np
from inferelator_ng import kvs_controller
from inferelator_ng import bbsr_python
from inferelator_ng import bayes_stats
from inferelator_ng import regression
my_dir = os.path.dirname(__file__)
class TestBBSRrunnerPython(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBBSRrunnerPython, self).__init__(*args, **kwargs)
# Extra behavior: only run if KVSClient can reach the host:
self.kvs = None # dummy value on failure
try:
self.kvs = kvs_controller.KVSController()
except Exception as e:
if str(e) == 'Missing host':
print('Test test_bbsr.py exiting since KVS host is not running')
print('Try rerunning tests with python $LOCALREPO/kvsstcp.py --execcmd "nosetests --nocapture -v"')
self.missing_kvs_host = True
# Mock out Slurm process IDs so that KVS can access this process ID in bbsr_python.py
os.environ['SLURM_PROCID'] = str(0)
os.environ['SLURM_NTASKS'] = str(1)
def get_kvs(self):
result = self.kvs
if result is None:
self.fail("Test requires missing KVS host.")
return result
def setUp(self):
# Check for os.environ['SLURM_NTASKS']
self.rank = 0
self.brd = bbsr_python.BBSR
def run_bbsr(self):
kvs = self.get_kvs()
return self.brd(self.X, self.Y, self.clr, self.priors, kvs=kvs).run()
def set_all_zero_priors(self):
self.priors = pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])
def set_all_zero_clr(self):
self.clr = pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])
def assert_matrix_is_square(self, size, matrix):
self.assertEqual(matrix.shape, (size, size))
def test_two_genes(self):
self.set_all_zero_priors()
self.set_all_zero_clr()
self.X = pd.DataFrame([0, 0], index = ['gene1', 'gene2'], columns = ['ss'])
self.Y = pd.DataFrame([0, 0], index = ['gene1', 'gene2'], columns = ['ss'])
(betas, resc) = self.run_bbsr()
self.assert_matrix_is_square(2, betas)
self.assert_matrix_is_square(2, resc)
pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
'''
def test_fails_with_one_gene(self):
self.set_all_zero_priors()
self.set_all_zero_clr()
self.X = pd.DataFrame([0], index = ['gene1'], columns = ['ss'])
self.Y = pd.DataFrame([0], index = ['gene1'], columns = ['ss'])
self.assertRaises(CalledProcessError, self.brd.run, self.X, self.Y, self.clr, self.priors)
'''
def test_two_genes_nonzero(self):
self.set_all_zero_priors()
self.set_all_zero_clr()
self.X = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])
self.Y = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])
(betas, resc) = self.run_bbsr()
self.assert_matrix_is_square(2, betas)
self.assert_matrix_is_square(2, resc)
pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
# BBSR fails when there's only one column in the design (or response) matrix
# That seems like unexpected behavior to me. If it is expected, there should be checks for it earlier -<NAME>
'''
@unittest.skip("""
There's some unexpected behavior in bayesianRegression.R: a 2 x 1 matrix is getting transformed into a NaN matrix
ss
gene1 NaN
gene2 NaN
attr(,"scaled:center")
gene1 gene2
1 2
attr(,"scaled:scale")
gene1 gene2
0 0
""")
'''
def test_two_genes_nonzero_clr_nonzero(self):
self.set_all_zero_priors()
self.X = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])
self.Y = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])
self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])
(betas, resc) = self.run_bbsr()
self.assert_matrix_is_square(2, betas)
self.assert_matrix_is_square(2, resc)
pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
def test_two_genes_nonzero_clr_two_conditions_negative_influence(self):
self.set_all_zero_priors()
self.X = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])
self.Y = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])
self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])
(betas, resc) = self.run_bbsr()
self.assert_matrix_is_square(2, betas)
self.assert_matrix_is_square(2, resc)
pdt.assert_frame_equal(betas, pd.DataFrame([[0, -1],[-1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
pdt.assert_frame_equal(resc, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))
def test_two_genes_nonzero_clr_two_conditions_zero_gene1_negative_influence(self):
self.set_all_zero_priors()
self.X = pd.DataFrame([[0, 2], [2, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])
self.Y = pd.DataFrame([[0, 1], [1, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])
self.clr = | pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']) | pandas.DataFrame |
import gcsfs
import pandas as pd
import regex
class Table2Df:
def __init__(self, table_fit, fs):
self.table = table_fit
self.fs = fs
self.df = None
self.data = table_fit.data
self.table_data = self.table.data.drop(self.table.header_indices)
def reconstruct_table(self):
"""
Takes the information stored in the table_fit object and reconstitutes it into
a DataFrame that mirrors how we see the table on the balance sheet.
Arguments:
None
Returns:
new_df: pandas DataFrame of table data from the balance sheet
Raises:
None
"""
headers = self.headers_to_strings(self.table.header_groups)
new_df = pd.DataFrame(columns=headers)
# For each row from our collected data, add the value in the corresponding position
for index, row in self.table_data.iterrows():
new_df.loc[row["line_num"], headers[int(row["column"])]] = row["value"]
return new_df
def headers_to_strings(self, headers):
"""
Converts groups of header indices into a list of strings of all elements
of each header group combined into a string.
Arguments:
headers: List of grouped header indices (the result of
TableFitter.group_header_points())
Returns:
str_headers: List of strings corresponding to the header values
Raises:
None
"""
# Set a value for the first column (which won't have a title)
str_headers = ["Entities"]
# Loop over the header groups
for h in headers:
k = 0
new_string = ""
# Add all strings together
while k < len(h):
new_string+= (self.data.loc[h[k], "value"] + "\n")
k+=1
str_headers.append(new_string)
return str_headers
def get_info_headers(self, years = range(1999,2020)):
"""
Creates a DataFrame of information of column info (meta data). For each column in
our fitted table object, we record the corresponding date and units (currency).
Arguments:
years: List of possible years to search for.
Returns:
header_data: pandas DataFrame of column number with their relevant date and units
as other variables
Raises:
None
"""
# Get a list of header indicies where we see a currency symbol
currency_indexes = [i for i in self.table.header_indices if
len(regex.findall(r"\p{Sc}", self.data.loc[i, "value"]))]
self.unit_headers = currency_indexes
# As above but for where we see a year
date_indexes = []
for i in self.table.header_indices:
contains_year = any([str(y) in self.data.loc[i, "value"] for y in years])
if contains_year:
date_indexes.append(i)
self.date_headers = date_indexes
# Create an empty DataFrame to add information to
header_data = pd.DataFrame(columns=["column", "date", "unit"])
# Add information for each column header to the DataFrame
for i, g in enumerate(self.table.header_groups):
unit_col = [j for j in g if j in self.unit_headers]
date_col = [j for j in g if j in self.date_headers]
if len(unit_col)==1 or len(date_col)==1:
header_data.loc[i, "column"] = i+1
# Work out the unit if we don't have one assigned
if not unit_col:
header_data.loc[i, "date"] = self.data.loc[date_col[0], "value"]
header_data.loc[i, "unit"] = self.get_closest_el(self.data, date_col[0], self.unit_headers)
# As above but for dates
elif not date_col:
header_data.loc[i, "date"] = self.get_closest_el(self.data, unit_col[0], self.date_headers)
header_data.loc[i, "unit"] = self.data.loc[unit_col[0], "value"]
# If we already have both add them to the dataset
else:
header_data.loc[i, "date"] = self.data.loc[date_col[0], "value"]
header_data.loc[i, "unit"] = self.data.loc[unit_col[0], "value"]
return header_data
def get_info_headers_v2(self, years = range(1999,2020)):
"""
Creates a DataFrame of information of column info (meta data). For each column in
our fitted table object, we record the corresponding date and units (currency).
Arguments:
years: List of possible years to search for.
Returns:
header_data: pandas DataFrame of column number with their relevant date and units
as other variables
Raises:
None
"""
self.data_cols = [i+1 for i,g in enumerate(self.table.header_groups) if self.table.notes_row[0] not in g]
data_cols = [i+1 for i,g in enumerate(self.table.header_groups) if self.table.notes_row[0] not in g]
currencies = [self.data.loc[i, "value"] for i in self.table.header_indices if
len(regex.findall(r"\p{Sc}", self.data.loc[i, "value"]))]
currency = max(set(currencies), key=currencies.count)
# As above but for where we see a year
dates = []
for i in self.table.header_indices:
contains_year = any([str(y) in self.data.loc[i, "value"] for y in years])
if contains_year:
dates.append(self.data.loc[i, "value"])
self.dates = dates
if len(data_cols)%len(dates) != 0:
raise(TypeError("Cannot logically fit dates to columns"))
else:
header_dict = {"column": data_cols, "date":[dates[i//(len(data_cols)//len(dates))] for i in range(len(data_cols))],
"unit":[currency]*len(data_cols)}
# Create an empty DataFrame to add information to
header_data = | pd.DataFrame.from_dict(header_dict) | pandas.DataFrame.from_dict |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with | tm.assert_produces_warning(UserWarning) | pandas._testing.assert_produces_warning |
"""
DataFrames that are essentially n-by-m matrices.
"""
from __future__ import annotations
import abc
from copy import deepcopy
from functools import partial
from inspect import cleandoc
from typing import Sequence, Set, Tuple, Union
import numpy as np
import pandas as pd
from numpy.random import RandomState
from typeddfs.base_dfs import BaseDf
from typeddfs.df_errors import InvalidDfError, RowColumnMismatchError, VerificationFailedError
from typeddfs.df_typing import FINAL_DF_TYPING, DfTyping
from typeddfs.typed_dfs import TypedDf
class LongFormMatrixDf(TypedDf):
"""
A long-form matrix with columns "row", "column", and "value".
"""
@classmethod
def get_typing(cls) -> DfTyping:
return DfTyping(_required_columns=["row", "column", "value"])
class _MatrixDf(BaseDf, metaclass=abc.ABCMeta):
@classmethod
def convert(cls, df: pd.DataFrame) -> __qualname__:
if not isinstance(df, pd.DataFrame):
raise TypeError(f"Can't convert {type(df)} to {cls.__name__}")
# first always reset the index so we can manage what's in the index vs columns
# index_names() will return [] if no named indices are found
df.__class__ = cls
t = cls.get_typing()
# df = df.vanilla_reset()
# df = df.set_index(t.required_index_names[0])
if df.index.names == [None] and "row" in df.columns:
df = df.set_index("row")
df.columns.name = "column"
df.index.name = "row"
if t.value_dtype is not None:
df = df.astype(t.value_dtype)
df.index = df.index.astype(str)
df.columns = df.columns.astype(str)
# now change the class
df.__class__ = cls
# noinspection PyProtectedMember
cls._check(df)
return df
@classmethod
def _check(cls, df) -> None:
t = cls.get_typing()
# TODO: Why doesn't .dtype work?
if [str(c) for c in df.index.names] != list(df.index.names):
raise InvalidDfError("Some index names are non-str")
if [str(c) for c in df.columns] != df.columns.tolist():
raise InvalidDfError("Some columns are non-str")
for req in t.verifications:
value = req(df)
if value is not None and value is not True:
raise VerificationFailedError(str(value))
def is_symmetric(self) -> bool:
"""
Returns True if the matrix is fully symmetric with exact equality.
"""
return self.rows == self.cols and np.array_equal(self.values, self.T.values)
def sub_matrix(self, rows: Set[str], cols: Set[str]) -> __qualname__:
"""
Returns a matrix containing only these labels.
"""
return self.__class__(self.loc[rows][cols])
def long_form(self) -> LongFormMatrixDf:
"""
Melts into a long-form DataFrame with columns "row", "column", and "value".
Consider calling ``triangle`` first if the matrix is (always) symmetric.
"""
# TODO: melt wasn't working
df = []
for r, row in enumerate(self.rows):
for c, col in enumerate(self.cols):
df.append(pd.Series(dict(row=row, column=col, value=self.iat[r, c])))
return LongFormMatrixDf.convert(pd.DataFrame(df))
def triangle(self, upper: bool = False, strict: bool = False) -> __qualname__:
"""
NaNs out the upper (or lower) triangle, returning a copy.
Arguments:
upper: Keep the upper triangular matrix instead of the lower
strict: Discard the diagonal (set it to NaN)
"""
fn = np.triu if upper else np.tril
fn = partial(fn, k=1) if strict else fn
return self.__class__(self.where(fn(np.ones(self.shape)).astype(bool)))
def sort_alphabetical(self) -> __qualname__:
"""
Sorts by the rows and columns alphabetically.
"""
df = self.sort_natural_index()
df = df.transpose().sort_natural_index()
df = df.transpose()
return df
def shuffle(self, rand: Union[None, int, RandomState] = None) -> __qualname__:
"""
Returns a copy with every value mapped to a new location.
Destroys the correct links between labels and values.
Useful for permutation tests.
"""
cp = deepcopy(self.flatten())
if rand is None:
rand = np.random.RandomState()
elif isinstance(rand, int):
rand = np.random.RandomState(seed=rand)
rand.shuffle(cp)
values = cp.reshape((len(self.rows), len(self.columns)))
return self.__class__(values, index=self.rows, columns=self.columns)
def diagonals(self) -> np.array:
"""
Returns an array of the diagonal elements.
"""
return pd.Series(np.diag(self), index=[self.index, self.columns]).values
def flatten(self) -> np.array:
"""
Flattens the values into a 1-d array.
"""
return self.values.flatten()
@property
def dim_str(self) -> str:
"""
Returns a simple string of n_rows by n_columns.
E.g.: ``15 × 15``.
"""
return f"{len(self.rows)} × {len(self.columns)}"
@property
def dims(self) -> Tuple[int, int]:
"""
Returns (n rows, n_columns).
"""
return len(self.rows), len(self.columns)
@property
def rows(self) -> Sequence[str]:
"""
Returns the row labels.
"""
return self.index.tolist()
@property
def cols(self) -> Sequence[str]:
"""
Returns the column labels.
"""
return self.columns.tolist()
def _repr_html_(self) -> str:
cls = self.__class__
mark = "✅" if self.__class__.is_valid(self) else "❌"
return cleandoc(
f"""
<strong>{cls.name}: {self.dim} {mark}</strong>
{pd.DataFrame. | _repr_html_(self) | pandas.DataFrame._repr_html_ |
# -*- coding: utf-8 -*-
"""
Utility functions.
"""
import requests
from re import search, IGNORECASE
import os
import pandas as pd
from xmltodict import parse
from multiprocessing.pool import ThreadPool
from time import sleep
import itertools
###############################################
### Parameters
mission_product_dict = {
'gpm': {
'base_url': 'https://gpm1.gesdisc.eosdis.nasa.gov:443',
'process_level': 'GPM_L3',
'version': 6,
'products': {
'3IMERGHHE': '{mission}_{product}.{version:02}/{year}/{dayofyear:03}/3B-HHR-E.MS.MRG.3IMERG.{date}-S{time_start}-E{time_end}.{minutes}.V{version:02}B.HDF5',
'3IMERGHHL': '{mission}_{product}.{version:02}/{year}/{dayofyear:03}/3B-HHR-L.MS.MRG.3IMERG.{date}-S{time_start}-E{time_end}.{minutes}.V{version:02}B.HDF5',
'3IMERGHH': '{mission}_{product}.{version:02}/{year}/{dayofyear:03}/3B-HHR.MS.MRG.3IMERG.{date}-S{time_start}-E{time_end}.{minutes}.V{version:02}B.HDF5'
}
}
}
master_datasets = {'3IMERGHHE': ['precipitationQualityIndex', 'IRkalmanFilterWeight', 'precipitationCal', 'HQprecipitation', 'probabilityLiquidPrecipitation', 'randomError', 'IRprecipitation'],
'3IMERGHHL': ['precipitationQualityIndex', 'IRkalmanFilterWeight', 'precipitationCal', 'HQprecipitation', 'probabilityLiquidPrecipitation', 'randomError', 'IRprecipitation'],
'3IMERGHH': ['precipitationQualityIndex', 'IRkalmanFilterWeight', 'precipitationCal', 'HQprecipitation', 'probabilityLiquidPrecipitation', 'randomError', 'IRprecipitation']}
###############################################
### Functions
def parse_dates(date, url):
"""
"""
counter = 4
while counter > 0:
date_xml = requests.get(url + '/catalog.xml')
if date_xml.status_code == 200:
break
else:
print('Retrying in 3 seconds...')
counter = counter - 1
sleep(3)
date_lst = parse(date_xml.content)['thredds:catalog']['thredds:dataset']['thredds:dataset']
if not isinstance(date_lst, list):
date_lst = [date_lst]
date_lst = [d for d in date_lst if not '.xml' in d['@name']]
lst1 = [[date, d['@name'].split('-S')[1][:6], d['@name'].split('0-E')[1][:6], d['@name'], d['@ID'], int(d['thredds:dataSize']['#text']), d['thredds:date']['#text']] for d in date_lst]
return lst1
def parse_nasa_catalog(mission, product, version, from_date=None, to_date=None, min_max=False):
"""
Function to parse the NASA Hyrax dap server via the catalog xml.
Parameters
----------
missions : str, list of str, or None
The missions to parse. None will parse all available.
products : str, list of str, or None
The products to parse. None will parse all available.
version : int
The product version.
from_date : str or None
The start date to query.
end_date : str or None
The end date to query.
min_max : bool
Should only the min and max dates of the product and version be returned?
Returns
-------
DataFrame
indexed by mission and product
Notes
-----
I wish their server was faster, but if you try to query too many dates then it might take a while.
"""
## mission/product parse
base_url = mission_product_dict[mission]['base_url']
mis_url = '/'.join([base_url, 'opendap/hyrax', mission_product_dict[mission]['process_level']])
prod_xml = requests.get(mis_url + '/catalog.xml')
prod_lst = parse(prod_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
prod1 = [p for p in prod_lst if (product in p['@name']) & (str(version) in p['@name'])]
if not prod1:
raise ValueError('No combination of product and version in specified mission')
## Parse available years
years_url = '/'.join([mis_url, prod1[0]['@name']])
years_xml = requests.get(years_url + '/catalog.xml')
years_lst = parse(years_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
if isinstance(years_lst, list):
years_dict = {int(y['@name']): y for y in years_lst}
else:
years_dict = {int(years_lst['@name']): years_lst}
## Parse available months/days of the year
big_lst = []
for y in years_dict:
my_url = '/'.join([years_url, str(y)])
my_xml = requests.get(my_url + '/catalog.xml')
my_lst = parse(my_xml.content)['thredds:catalog']['thredds:dataset']['thredds:catalogRef']
if not isinstance(my_lst, list):
my_lst = [my_lst]
big_lst.extend([[y, int(d['@name']), base_url + d['@ID']] for d in my_lst])
my_df = pd.DataFrame(big_lst, columns=['year', 'dayofyear', 'url'])
my_df['date'] = pd.to_datetime(my_df.year.astype(str)) + | pd.to_timedelta(my_df.dayofyear - 1, unit='D') | pandas.to_timedelta |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
from src.apply_gnn_to_datasets import eval_original, eval_conf, eval_sbm, eval_sbm_label, eval_modcm, eval_reglabel, eval_shuffled_features
from src.apply_gnn_to_datasets import eval_random, eval_erdos, eval_flipped, eval_removed_hubs, eval_added_2hop_edges, eval_random_features
from src.apply_gnn_to_datasets import eval_label_sbm, eval_injected_edges, eval_injected_edges_sbm, eval_injected_edges_constant_nodes
from src.apply_gnn_to_datasets import eval_sbm_swap, eval_injected_edges_degree_cat,eval_injected_edges_attack_target
from src.apply_gnn_to_datasets import eval_cm_communities, eval_modsbm, eval_shifting
import argparse
import pandas as pd
import os
import warnings
import numpy as np
warnings.filterwarnings("ignore")
def parse_args():
parser = argparse.ArgumentParser(description="Main GNN training script.")
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--shuffled_features',
type=bool,
default=False,
help='Is shuffled features evaluation. Default is False.')
parser.add_argument('--random_features',
type=bool,
default=False,
help='Is random features evaluation. Default is False.')
parser.add_argument('--cm_communities',
type=bool,
default=False,
help='Is configuration model on communities evaluation. Default is False.')
parser.add_argument('--modcm',
type=bool,
default=False,
help='Is modified configuration model evaluation. Default is False.')
parser.add_argument('--modsbm',
type=bool,
default=False,
help='Is modified SBM evaluation. Default is False.')
parser.add_argument('--reglabel',
type=bool,
default=False,
help='Is label-based d-regular graph evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--sbm_swap',
type=bool,
default=False,
help='Is SBM evaluation with synthetic swapping. Default is False.')
parser.add_argument('--swap',
type=bool,
default=False,
help='Is it evaluation with synthetic swapping. Default is False.')
parser.add_argument('--random',
type=bool,
default=False,
help='Evaluating on a completely random d-regular graph?. Default is False.')
parser.add_argument('--erdos',
type=bool,
default=False,
help='Evaluating on a completely random Erdos-Renyi graph?. Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating on a completely label_sbm graph?. Default is False.')
parser.add_argument('--injected_edges',
type=bool,
default=False,
help='Evaluating on a completely injected_edges graph?. Default is False.')
parser.add_argument('--injected_edges_degree_cat',
type=bool,
default=False,
help='Evaluating on a completely injected_edges_degree_cat graph?. Default is False.')
parser.add_argument('--injected_edges_constant_nodes',
type=bool,
default=False,
help='Evaluating on a completely injected_edges_constant_nodes graph?. Default is False.')
parser.add_argument('--injected_edges_attack_target',
type=bool,
default=False,
help='Evaluating on a completely injected_edges_attack_target graph?. Default is False.')
parser.add_argument('--injected_edges_sbm',
type=bool,
default=False,
help='Evaluating on a completely injected_edges_sbm graph?. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--hubs_experiment',
default='weak',
help='hubs experiment type (loca_hubs, global_hubs, local_edges, global_edges). Default is None.')
parser.add_argument('--datasets',
nargs='+',
help='datasets to process, e.g., --dataset cora pubmed')
parser.add_argument('--models',
nargs='+',
help='models to evaluate, e.g., --models gcn sage gat')
parser.add_argument('--splits',
type=int,
default=20,
help='Number of random train/validation/test splits. Default is 20.')
parser.add_argument('--runs',
type=int,
default=5,
help='Number of random initializations of the model. Default is 5.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
parser.add_argument('--directionalities',
nargs='+',
default=['undirected'],
help='directionalities, example: --directionalities undirected directed reversed. Default: undirected')
args = parser.parse_args()
return args
datasets_similar_to_pubmed = 'cora_full ms_academic_cs ms_academic_phy'.split()
def model_selection(model, dataset):
if model == 'gat' and dataset in datasets_similar_to_pubmed:
dataset = 'pubmed' # take pubmed hyperparams and apply them to cora_full
# if dataset == 'citeseer':
# dataset = 'cora' # take cora hyperparams and apply them to citeseer
filename = f'reports/results/eval/{model}_val_{dataset}_undirected.csv'
if not os.path.exists(filename):
filename = f'reports/results/eval/{model}.csv'
print(f'reading hyperparams from file {filename}')
df = | pd.read_csv(filename) | pandas.read_csv |
# page 14
# ===============================
# Getting and checking dataset
# ===============================
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# print("Keys of iris_dataset: \n{}".format(iris_dataset.keys()))
"""
Keys of iris_dataset:
dict_keys(['data', 'DESCR', 'filename', 'target_names', 'feature_names', 'target'])
"""
# print(iris_dataset['DESCR'][:193] + "\n ...")
"""
Iris plants dataset
--------------------
**Data Set Characteristics:**
:Number of Instances: 150 (50 in each of three classes)
:Number of Attributes: 4 numeric, pre
...
"""
# print("Target names: {}".format(iris_dataset['target_names']))
"""
Target names: ['setosa' 'versicolor' 'virginica']
"""
# print("Feature names: \n{}".format(iris_dataset['feature_names']))
"""
Feature names:
['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
"""
# print("Type of data: {}".format(type(iris_dataset['data'])))
# print("Shape of data: {}".format(iris_dataset['data'].shape))
"""
Type of data: <class 'numpy.ndarray'>
Shape of data: (150, 4)
"""
# print("First five column of data:\n{}".format(iris_dataset['data'][:5]))
"""
First five column of data:
[[5.1 3.5 1.4 0.2]
[4.9 3. 1.4 0.2]
[4.7 3.2 1.3 0.2]
[4.6 3.1 1.5 0.2]
[5. 3.6 1.4 0.2]]
"""
# print("Type of target: {}".format(type(iris_dataset['target'])))
"""
Type of target: <class 'numpy.ndarray'>
"""
# print("Shape of target: {}".format(iris_dataset['target'].shape))
"""
Shape of target: (150,)
"""
# print("Target:\n{}".format(iris_dataset['target']))
# setona: 0, versicolor: 1, verginia: 2
"""
Target:
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2]
"""
# ===================================
# Generating training and test data
# ===================================
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'],
iris_dataset['target'],
random_state=0)
# print("X_train shape: {}".format(X_train.shape)) # matrix data for training
# print("y_train shape: {}".format(y_train.shape)) # labels (results) for training
"""
X_train shape: (112, 4)
y_train shape: (112,)
"""
# print("X_test shape: {}".format(X_test.shape)) # matrix data for test
# print("y_test shape: {}".format(y_test.shape)) # labels (results) for test
"""
X_test shape: (38, 4)
y_test shape: (38,)
"""
# ==================
# Observation data
# ==================
import pandas as pd
import matplotlib.pyplot as plt
iris_dataframe = | pd.DataFrame(X_train, columns=iris_dataset.feature_names) | pandas.DataFrame |
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
import numpy as np
import pandas as pd
from plotting import plot_3D_scatter
def get_features(data):
features = {}
for key, values in data.items():
timestamps, counts = np.unique(values["Timestamp"], return_counts=True)
features[key] = np.array([timestamps, counts]).T
return features
def get_octaves(data):
octaves = pd.DataFrame()
for index, (key, values) in enumerate(data.items()):
octave, counts = np.unique(values["Octave"], return_counts=True)
octaves.insert(index, key, counts)
return octaves
def feature_plot(features, xlims, ylims):
fig, ax = plt.subplots(figsize=(7, 2.0))
for key, value in features.items():
ax.plot(value[:, 0], value[:, 1], label=key)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Num. of features [-]")
ax.set_ylim(ylims)
ax.set_xlim(xlims)
lg = fig.legend(bbox_to_anchor=(0.69, 1.0), loc="upper center", \
frameon=True, fancybox=False, ncol=3)
fr = lg.get_frame()
fr.set_edgecolor("black")
fr.set_facecolor("white")
fig.subplots_adjust(left=0.10, right=0.975, top=0.75, bottom=0.17, \
wspace=0.2, hspace=0.675)
return fig
def octave_plot(octave_data):
# Octave distribution.
margin = 0.4
lefts, rights, centers = [], [], []
fig, ax = plt.subplots(figsize=(7, 2.5))
for index, method in enumerate(octave_data):
counts = octave_data[method]
fracs = counts / max(counts)
levels = np.arange(1, len(counts)+1)
center = (1.0 + margin) * index
left = center - 0.5
right = center + 0.5
lefts.append(left)
centers.append(center)
rights.append(right)
ax.barh(levels, fracs, left=-fracs/2+center, height=0.8, color="b", \
label=method)
ax.set_ylabel("Image Pyramid Level [-]")
ax.set_yticks([1, 2, 3, 4, 5, 6, 7, 8], minor=False)
ax.set_yticklabels([1, 2, 3, 4, 5, 6, 7, 8])
ax.set_xticks(centers, minor=False)
ax.set_xlim([lefts[0]-margin, rights[-1]+margin])
ax.set_xticklabels([col for col in octave_data.columns])
fig.tight_layout()
return fig
def main():
directory = "/home/martin/dev/Trajectory/Data/SLAM/"
time_lims = [ 1611313305.76, 1611313730 ]
paths = {}
paths["Raw"] = directory + "RAW-Keypoint-Statistics.csv"
paths["BLF"] = directory + "BLF-Keypoint-Statistics.csv"
paths["HE"] = directory + "HE-Keypoint-Statistics.csv"
paths["CLAHE"] = directory + "CLAHE-Keypoint-Statistics.csv"
paths["UIENet"] = directory + "UIENet-Keypoint-Statistics.csv"
data = {}
data["Raw"] = pd.read_csv(paths["Raw"])
data["BLF"] = pd.read_csv(paths["BLF"])
data["HE-BLF"] = | pd.read_csv(paths["HE"]) | pandas.read_csv |
import os
import numpy as np
from urllib import request
import errno
import struct
import tarfile
import glob
import scipy.io as sio
from sklearn.utils.extmath import cartesian
from scipy.stats import laplace
import joblib
from spriteworld import factor_distributions as distribs
from spriteworld import renderers as spriteworld_renderers
from spriteworld import sprite
import csv
from collections import defaultdict
import ast
from scripts.data_analysis_utils import load_csv
import pandas as pd
from sklearn import preprocessing
from sklearn import utils
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import ImageFolder
from torchvision.datasets.utils import download_url, check_integrity
from torchvision import transforms
from PIL import Image
import pickle
import h5py
from matplotlib import pyplot as plt
class TupleLoader(Dataset):
def __init__(self, k=-1, rate=1, prior='uniform', transform=None,
target_transform=None):
# k=-1 gives random number of changing factors as in Locatello k=rand
# rate=-1 gives random rate for each sample in Uniform(1,10)
self.index_manager = None # set in child class
self.factor_sizes = None # set in child class
self.categorical = None # set in child class
self.data = None # set in child class
self.prior = prior
self.transform = transform
self.target_transform = target_transform
self.rate = rate
self.k = k
if prior == 'laplace' and k != -1:
print('warning setting k has no effect on prior=laplace. '
'Set k=-1 or leave to default to get rid of this warning.')
if prior == 'uniform' and rate != -1:
print('warning setting rate has no effect on prior=uniform. '
'Set rate=-1 or leave to default to get rid of this warning.')
def __len__(self):
return len(self.data)
def sample_factors(self, num, random_state):
"""Sample a batch of observations X. Needed in dis. lib."""
assert not(num % 2)
batch_size = int(num / 2)
indices = random_state.choice(self.__len__(), 2 * batch_size, replace=False)
batch, latents = [], []
for ind in indices:
_, _, l1, _ = self.__getitem__(ind)
latents.append(l1)
return np.stack(latents)
def sample_observations_from_factors(self, factors, random_state):
batch = []
for factor in factors:
sample_ind = self.index_manager.features_to_index(factor)
sample = self.data[sample_ind]
if self.transform:
sample = self.transform(sample)
if len(sample.shape) == 2: # set channel dim to 1
sample = sample[None]
if np.issubdtype(sample.dtype, np.uint8):
sample = sample.astype(np.float32) / 255.
batch.append(sample)
return np.stack(batch)
def sample(self, num, random_state):
#Sample a batch of factors Y and observations X
factors = self.sample_factors(num, random_state)
return factors, self.sample_observations_from_factors(factors, random_state)
def sample_observations(self, num, random_state):
#Sample a batch of observations X
return self.sample(num, random_state)[1]
def __getitem__(self, idx):
n_factors = len(self.factor_sizes)
first_sample = self.data[idx]
first_sample_feat = self.index_manager.index_to_features(idx)
if self.prior == 'uniform':
# only change up to k factors
if self.k == -1:
k = np.random.randint(1, n_factors) # number of factors which can change
else:
k = self.k
second_sample_feat = first_sample_feat.copy()
indices = np.random.choice(n_factors, k, replace=False)
for ind in indices:
x = np.arange(self.factor_sizes[ind])
p = np.ones_like(x) / (x.shape[0] - 1)
p[x == first_sample_feat[ind]] = 0 # dont pick same
second_sample_feat[ind] = np.random.choice(x, 1, p=p)
assert np.equal(first_sample_feat - second_sample_feat, 0).sum() == n_factors - k
elif self.prior == 'laplace':
second_sample_feat = self.truncated_laplace(first_sample_feat)
else:
raise NotImplementedError
second_sample_ind = self.index_manager.features_to_index(second_sample_feat)
second_sample = self.data[second_sample_ind]
if self.transform:
first_sample = self.transform(first_sample)
second_sample = self.transform(second_sample)
if len(first_sample.shape) == 2: # set channel dim to 1
first_sample = first_sample[None]
second_sample = second_sample[None]
if np.issubdtype(first_sample.dtype, np.uint8) or np.issubdtype(second_sample.dtype, np.uint8):
first_sample = first_sample.astype(np.float32) / 255.
second_sample = second_sample.astype(np.float32) / 255.
if self.target_transform:
first_sample_feat = self.target_transform(first_sample_feat)
second_sample_feat = self.target_transform(second_sample_feat)
return first_sample, second_sample, first_sample_feat, second_sample_feat
def truncated_laplace(self, start):
if self.rate == -1:
rate = np.random.uniform(1, 10, 1)[0]
else:
rate = self.rate
end = []
n_factors = len(self.factor_sizes)
for mean, upper in zip(start, np.array(self.factor_sizes)): # sample each feature individually
x = np.arange(upper)
p = laplace.pdf(x, loc=mean, scale=np.log(upper) / rate)
p /= np.sum(p)
end.append(np.random.choice(x, 1, p=p)[0])
end = np.array(end).astype(np.int)
end[self.categorical] = start[self.categorical] # don't change categorical factors s.a. shape
# make sure there is at least one change
if np.sum(abs(start - end)) == 0:
ind = np.random.choice(np.arange(n_factors)[~self.categorical], 1)[0] # don't change categorical factors
x = np.arange(self.factor_sizes[ind])
p = laplace.pdf(x, loc=start[ind],
scale=np.log(self.factor_sizes[ind]) / rate)
p[x == start[ind]] = 0
p /= np.sum(p)
end[ind] = np.random.choice(x, 1, p=p)
assert np.sum(abs(start - end)) > 0
return end
class IndexManger(object):
"""Index mapping from features to positions of state space atoms."""
def __init__(self, factor_sizes):
"""Index to latent (= features) space and vice versa.
Args:
factor_sizes: List of integers with the number of distinct values for each
of the factors.
"""
self.factor_sizes = np.array(factor_sizes)
self.num_total = np.prod(self.factor_sizes)
self.factor_bases = self.num_total / np.cumprod(self.factor_sizes)
self.index_to_feat = cartesian([np.array(list(range(i))) for i in self.factor_sizes])
def features_to_index(self, features):
"""Returns the indices in the input space for given factor configurations.
Args:
features: Numpy matrix where each row contains a different factor
configuration for which the indices in the input space should be
returned.
"""
assert np.all((0 <= features) & (features <= self.factor_sizes))
index = np.array(np.dot(features, self.factor_bases), dtype=np.int64)
assert np.all((0 <= index) & (index < self.num_total))
return index
def index_to_features(self, index):
assert np.all((0 <= index) & (index < self.num_total))
features = self.index_to_feat[index]
assert np.all((0 <= features) & (features <= self.factor_sizes))
return features
class Cars3D(TupleLoader):
fname = 'nips2015-analogy-data.tar.gz'
url = 'http://www.scottreed.info/files/nips2015-analogy-data.tar.gz'
"""
[4, 24, 183]
0. phi altitude viewpoint
1. theta azimuth viewpoint
2. car type
"""
def __init__(self, path='./data/cars/', data=None, **tupel_loader_kwargs):
super().__init__(**tupel_loader_kwargs)
self.factor_sizes = [4, 24, 183]
self.num_factors = len(self.factor_sizes)
self.categorical = np.array([False, False, True])
self.data_shape = [64, 64, 3]
self.index_manager = IndexManger(self.factor_sizes)
# download automatically if not exists
if not os.path.exists(path):
self.download_data(path)
if data is None:
all_files = glob.glob(path + '/*.mat')
self.data = np.moveaxis(self._load_data(all_files).astype(np.float32), 3, 1)
else: # speedup for debugging
self.data = data
def _load_data(self, all_files):
def _load_mesh(filename):
"""Parses a single source file and rescales contained images."""
with open(os.path.join(filename), "rb") as f:
mesh = np.einsum("abcde->deabc", sio.loadmat(f)["im"])
flattened_mesh = mesh.reshape((-1,) + mesh.shape[2:])
rescaled_mesh = np.zeros((flattened_mesh.shape[0], 64, 64, 3))
for i in range(flattened_mesh.shape[0]):
pic = Image.fromarray(flattened_mesh[i, :, :, :])
pic.thumbnail((64, 64), Image.ANTIALIAS)
rescaled_mesh[i, :, :, :] = np.array(pic)
return rescaled_mesh * 1. / 255
dataset = np.zeros((24 * 4 * 183, 64, 64, 3))
for i, filename in enumerate(all_files):
data_mesh = _load_mesh(filename)
factor1 = np.array(list(range(4)))
factor2 = np.array(list(range(24)))
all_factors = np.transpose([np.tile(factor1, len(factor2)),
np.repeat(factor2, len(factor1)),
np.tile(i, len(factor1) * len(factor2))])
indexes = self.index_manager.features_to_index(all_factors)
dataset[indexes] = data_mesh
return dataset
def download_data(self, load_path='./data/cars/'):
os.makedirs(load_path, exist_ok=True)
print('downlading data may take a couple of seconds, total ~ 300MB')
request.urlretrieve(self.url, os.path.join(load_path, self.fname))
print('extracting data, do NOT interrupt')
tar = tarfile.open(os.path.join(load_path, self.fname), "r:gz")
tar.extractall()
tar.close()
print('saved data at', load_path)
class SmallNORB(TupleLoader):
"""`MNIST <https://cs.nyu.edu/~ylclab/data/norb-v1.0-small//>`_ Dataset.
factors:
[5, 10, 9, 18, 6]
- 0. (0 to 4) 0 for animal, 1 for human, 2 for plane, 3 for truck, 4 for car).
- 1. the instance in the category (0 to 9)
- 2. the elevation (0 to 8, which mean cameras are 30, 35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
- 3. the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in degrees)
- 4. the lighting condition (0 to 5)
"""
dataset_root = "https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/"
data_files = {
'train': {
'dat': {
"name": 'smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat',
"md5_gz": "66054832f9accfe74a0f4c36a75bc0a2",
"md5": "8138a0902307b32dfa0025a36dfa45ec"
},
'info': {
"name": 'smallnorb-5x46789x9x18x6x2x96x96-training-info.mat',
"md5_gz": "51dee1210a742582ff607dfd94e332e3",
"md5": "19faee774120001fc7e17980d6960451"
},
'cat': {
"name": 'smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat',
"md5_gz": "23c8b86101fbf0904a000b43d3ed2fd9",
"md5": "fd5120d3f770ad57ebe620eb61a0b633"
},
},
'test': {
'dat': {
"name": 'smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat',
"md5_gz": "e4ad715691ed5a3a5f138751a4ceb071",
"md5": "e9920b7f7b2869a8f1a12e945b2c166c"
},
'info': {
"name": 'smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat',
"md5_gz": "a9454f3864d7fd4bb3ea7fc3eb84924e",
"md5": "7c5b871cc69dcadec1bf6a18141f5edc"
},
'cat': {
"name": 'smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat',
"md5_gz": "5aa791cd7e6016cf957ce9bdb93b8603",
"md5": "fd5120d3f770ad57ebe620eb61a0b633"
},
},
}
raw_folder = 'raw'
processed_folder = 'processed'
train_image_file = 'train_img'
train_label_file = 'train_label'
train_info_file = 'train_info'
test_image_file = 'test_img'
test_label_file = 'test_label'
test_info_file = 'test_info'
extension = '.pt'
def __init__(self, path='./data/smallNORB/', download=True,
mode="all",
transform=None,
evaluate=False,
**tupel_loader_kwargs):
super().__init__(**tupel_loader_kwargs)
self.root = os.path.expanduser(path)
self.mode = mode
self.evaluate = evaluate
self.factor_sizes = [5, 10, 9, 18, 6]
self.latent_factor_indices = [0, 2, 3, 4]
self.num_factors = len(self.latent_factor_indices)
self.categorical = np.array([True, True, False, False, False])
self.index_manager = IndexManger(self.factor_sizes)
if transform:
self.transform = transform
else:
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((64, 64), interpolation=2),
transforms.ToTensor(),
lambda x: x.numpy()])
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# load labels
labels_train = self._load(self.train_label_file)
labels_test = self._load(self.test_label_file)
# load info files
infos_train = self._load(self.train_info_file)
infos_test = self._load(self.test_info_file)
# load right set
data_train = self._load("{}_left".format(self.train_image_file))
data_test = self._load("{}_left".format(self.test_image_file))
info_train = torch.cat([labels_train[:, None], infos_train], dim=1)
info_test = torch.cat([labels_test[:, None], infos_test], dim=1)
infos = torch.cat([info_train, info_test])
data = torch.cat([data_train, data_test])
sorted_inds = np.lexsort([infos[:, i] for i in range(4, -1, -1)])
self.infos = infos[sorted_inds]
self.data = data[sorted_inds].numpy() # is uint8
def sample_factors(self, num, random_state):
# override super to ignore instance (see https://github.com/google-research/disentanglement_lib/blob/86a644d4ed35c771560dc3360756363d35477357/disentanglement_lib/data/ground_truth/norb.py#L52)
factors = super().sample_factors(num, random_state)
if self.evaluate:
factors = np.concatenate([factors[:, :1], factors[:, 2:]], 1)
return factors
def sample_observations_from_factors(self, factors, random_state):
# override super to ignore instance (see https://github.com/google-research/disentanglement_lib/blob/86a644d4ed35c771560dc3360756363d35477357/disentanglement_lib/data/ground_truth/norb.py#L52)
if self.evaluate:
instances = random_state.randint(0, self.factor_sizes[1], factors[:, :1].shape)
factors = np.concatenate([factors[:, :1], instances, factors[:, 1:]], 1)
return super().sample_observations_from_factors(factors, random_state)
def __len__(self):
return len(self.data)
def _transform(self, img):
# doing this so that it is consistent with all other data sets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
return img
def _load(self, file_name):
return torch.load(os.path.join(self.root, self.processed_folder, file_name + self.extension))
def _save(self, file, file_name):
with open(os.path.join(self.root, self.processed_folder, file_name + self.extension), 'wb') as f:
torch.save(file, f)
def _check_exists(self):
""" Check if processed files exists."""
files = (
"{}_left".format(self.train_image_file),
"{}_right".format(self.train_image_file),
"{}_left".format(self.test_image_file),
"{}_right".format(self.test_image_file),
self.test_label_file,
self.train_label_file
)
fpaths = [os.path.exists(os.path.join(self.root, self.processed_folder, f + self.extension)) for f in files]
return False not in fpaths
def _flat_data_files(self):
return [j for i in self.data_files.values() for j in list(i.values())]
def _check_integrity(self):
"""Check if unpacked files have correct md5 sum."""
root = self.root
for file_dict in self._flat_data_files():
filename = file_dict["name"]
md5 = file_dict["md5"]
fpath = os.path.join(root, self.raw_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
"""Download the SmallNORB data if it doesn't exist in processed_folder already."""
import gzip
if self._check_exists():
return
# check if already extracted and verified
if self._check_integrity():
print('Files already downloaded and verified')
else:
# download and extract
for file_dict in self._flat_data_files():
url = self.dataset_root + file_dict["name"] + '.gz'
filename = file_dict["name"]
gz_filename = filename + '.gz'
md5 = file_dict["md5_gz"]
fpath = os.path.join(self.root, self.raw_folder, filename)
gz_fpath = fpath + '.gz'
# download if compressed file not exists and verified
download_url(url, os.path.join(self.root, self.raw_folder), gz_filename, md5)
print('# Extracting data {}\n'.format(filename))
with open(fpath, 'wb') as out_f, \
gzip.GzipFile(gz_fpath) as zip_f:
out_f.write(zip_f.read())
os.unlink(gz_fpath)
# process and save as torch files
print('Processing...')
# create processed folder
try:
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# read train files
left_train_img, right_train_img = self._read_image_file(self.data_files["train"]["dat"]["name"])
train_info = self._read_info_file(self.data_files["train"]["info"]["name"])
train_label = self._read_label_file(self.data_files["train"]["cat"]["name"])
# read test files
left_test_img, right_test_img = self._read_image_file(self.data_files["test"]["dat"]["name"])
test_info = self._read_info_file(self.data_files["test"]["info"]["name"])
test_label = self._read_label_file(self.data_files["test"]["cat"]["name"])
# save training files
self._save(left_train_img, "{}_left".format(self.train_image_file))
self._save(right_train_img, "{}_right".format(self.train_image_file))
self._save(train_label, self.train_label_file)
self._save(train_info, self.train_info_file)
# save test files
self._save(left_test_img, "{}_left".format(self.test_image_file))
self._save(right_test_img, "{}_right".format(self.test_image_file))
self._save(test_label, self.test_label_file)
self._save(test_info, self.test_info_file)
print('Done!')
@staticmethod
def _parse_header(file_pointer):
# Read magic number and ignore
struct.unpack('<BBBB', file_pointer.read(4)) # '<' is little endian)
# Read dimensions
dimensions = []
num_dims, = struct.unpack('<i', file_pointer.read(4)) # '<' is little endian)
for _ in range(num_dims):
dimensions.extend(struct.unpack('<i', file_pointer.read(4)))
return dimensions
def _read_image_file(self, file_name):
fpath = os.path.join(self.root, self.raw_folder, file_name)
with open(fpath, mode='rb') as f:
dimensions = self._parse_header(f)
assert dimensions == [24300, 2, 96, 96]
num_samples, _, height, width = dimensions
left_samples = np.zeros(shape=(num_samples, height, width), dtype=np.uint8)
right_samples = np.zeros(shape=(num_samples, height, width), dtype=np.uint8)
for i in range(num_samples):
# left and right images stored in pairs, left first
left_samples[i, :, :] = self._read_image(f, height, width)
right_samples[i, :, :] = self._read_image(f, height, width)
return torch.ByteTensor(left_samples), torch.ByteTensor(right_samples)
@staticmethod
def _read_image(file_pointer, height, width):
"""Read raw image data and restore shape as appropriate. """
image = struct.unpack('<' + height * width * 'B', file_pointer.read(height * width))
image = np.uint8(np.reshape(image, newshape=(height, width)))
return image
def _read_label_file(self, file_name):
fpath = os.path.join(self.root, self.raw_folder, file_name)
with open(fpath, mode='rb') as f:
dimensions = self._parse_header(f)
assert dimensions == [24300]
num_samples = dimensions[0]
struct.unpack('<BBBB', f.read(4)) # ignore this integer
struct.unpack('<BBBB', f.read(4)) # ignore this integer
labels = np.zeros(shape=num_samples, dtype=np.int32)
for i in range(num_samples):
category, = struct.unpack('<i', f.read(4))
labels[i] = category
return torch.LongTensor(labels)
def _read_info_file(self, file_name):
fpath = os.path.join(self.root, self.raw_folder, file_name)
with open(fpath, mode='rb') as f:
dimensions = self._parse_header(f)
assert dimensions == [24300, 4]
num_samples, num_info = dimensions
struct.unpack('<BBBB', f.read(4)) # ignore this integer
infos = np.zeros(shape=(num_samples, num_info), dtype=np.int32)
for r in range(num_samples):
for c in range(num_info):
info, = struct.unpack('<i', f.read(4))
infos[r, c] = info
return torch.LongTensor(infos)
class Shapes3D(TupleLoader):
"""Shapes3D dataset.
self.factor_sizes = [10, 10, 10, 8, 4, 15]
The data set was originally introduced in "Disentangling by Factorising".
The ground-truth factors of variation are:
0 - floor color (10 different values)
1 - wall color (10 different values)
2 - object color (10 different values)
3 - object size (8 different values)
4 - object type (4 different values)
5 - azimuth (15 different values)
"""
#url = 'https://liquidtelecom.dl.sourceforge.net/project/shapes3d/Shapes3D.zip'
#fname = 'shapes3d.pkl'
url = 'https://storage.googleapis.com/3d-shapes/3dshapes.h5'
fname = '3dshapes.h5'
def __init__(self, path='./data/shapes3d/', data=None, **tupel_loader_kwargs):
super().__init__(**tupel_loader_kwargs)
self.factor_sizes = [10, 10, 10, 8, 4, 15]
self.num_factors = len(self.factor_sizes)
self.categorical = np.array([False, False, False, False, True, False])
self.index_manager = IndexManger(self.factor_sizes)
self.path = path
if not os.path.exists(self.path):
self.download()
# read dataset
print('init of shapes dataset (takes a couple of seconds) (large data array)')
if data is None:
with h5py.File(os.path.join(self.path, self.fname), 'r') as dataset:
images = dataset['images'][()]
self.data = np.transpose(images, (0, 3, 1, 2)) # np.uint8
else:
self.data = data
def download(self):
print('downloading shapes3d')
os.makedirs(self.path, exist_ok=True)
request.urlretrieve(self.url, os.path.join(self.path, self.fname))
class SpriteDataset(TupleLoader):
"""
A PyTorch wrapper for the dSprites dataset by
Matthey et al. 2017. The dataset provides a 2D scene
with a sprite under different transformations:
# dim, type, #values avail.-range
* 0, color | 1 | 1-1
* 1, shape | 3 | 1-3
* 2, scale | 6 | 0.5-1.
* 3, orientation | 40 | 0-2pi
* 4, x-position | 32 | 0-1
* 5, y-position | 32 | 0-1
for details see https://github.com/deepmind/dsprites-dataset
"""
def __init__(self, path='./data/dsprites/', **tupel_loader_kwargs):
super().__init__(**tupel_loader_kwargs)
url = "https://github.com/deepmind/dsprites-dataset/raw/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz"
self.path = path
self.factor_sizes = [3, 6, 40, 32, 32]
self.num_factors = len(self.factor_sizes)
self.categorical = np.array([True, False, False, False, False])
self.index_manager = IndexManger(self.factor_sizes)
try:
self.data = self.load_data()
except FileNotFoundError:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(
f'downloading dataset ... saving to {os.path.join(path, "dsprites.npz")}')
request.urlretrieve(url, os.path.join(path, 'dsprites.npz'))
self.data = self.load_data()
def __len__(self):
return len(self.data)
def load_data(self):
dataset_zip = np.load(os.path.join(self.path, 'dsprites.npz'),
encoding="latin1", allow_pickle=True)
return dataset_zip["imgs"].squeeze().astype(np.float32)
class MPI3DReal(TupleLoader):
"""
object_color white=0, green=1, red=2, blue=3, brown=4, olive=5
object_shape cone=0, cube=1, cylinder=2, hexagonal=3, pyramid=4, sphere=5
object_size small=0, large=1
camera_height top=0, center=1, bottom=2
background_color purple=0, sea green=1, salmon=2
horizontal_axis 0,...,39
vertical_axis 0,...,39
"""
url = 'https://storage.googleapis.com/disentanglement_dataset/Final_Dataset/mpi3d_real.npz'
fname = 'mpi3d_real.npz'
def __init__(self, path='./data/mpi3d_real/', **tupel_loader_kwargs):
super().__init__(**tupel_loader_kwargs)
self.factor_sizes = [6, 6, 2, 3, 3, 40, 40]
self.num_factors = len(self.factor_sizes)
self.categorical = np.array([False, True, False, False, False, False, False])
self.index_manager = IndexManger(self.factor_sizes)
if not os.path.exists(path):
self.download(path)
load_path = os.path.join(path, self.fname)
data = np.load(load_path)['images']
self.data = np.transpose(data.reshape([-1, 64, 64, 3]), (0, 3, 1, 2)) # np.uint8
def download(self, path):
os.makedirs(path, exist_ok=True)
print('downloading')
request.urlretrieve(self.url, os.path.join(path, self.fname))
print('download complete')
def value_to_key(x, val):
for k in x.keys():
if x[k] == val:
return k
def rgb(c):
return tuple((255 * np.array(c)).astype(np.uint8))
class NaturalSprites(Dataset):
def __init__(self, natural_discrete=False, path='./data/natural_sprites/'):
self.natural_discrete = natural_discrete
self.sequence_len = 2 #only consider pairs
self.area_filter = 0.1 #filter out 10% of outliers
self.path = path
self.fname = 'downscale_keepaspect.csv'
self.url = 'https://zenodo.org/record/3948069/files/downscale_keepaspect.csv?download=1'
self.load_data()
def load_data(self):
# download if not avaiable
file_path = os.path.join(self.path, self.fname)
if not os.path.exists(file_path):
os.makedirs(self.path, exist_ok=True)
print(f'file not found, downloading from {self.url} ...')
from urllib import request
url = self.url
request.urlretrieve(url, file_path)
with open(file_path) as data:
self.csv_dict = load_csv(data, sequence=self.sequence_len)
self.orig_num = [32, 32, 6, 40, 4, 1, 1, 1]
self.dsprites = {'x': np.linspace(0.2,0.8,self.orig_num[0]),
'y': np.linspace(0.2,0.8,self.orig_num[1]),
'scale': np.linspace(0,0.5,self.orig_num[2]+1)[1:],
'angle': np.linspace(0,360,self.orig_num[3],dtype=np.int,endpoint=False),
'shape': ['square', 'triangle', 'star_4', 'spoke_4'],
'c0': [1.], 'c1': [1.], 'c2': [1.]}
distributions = []
for key in self.dsprites.keys():
distributions.append(distribs.Discrete(key, self.dsprites[key]))
self.factor_dist = distribs.Product(distributions)
self.renderer = spriteworld_renderers.PILRenderer(image_size=(64, 64), anti_aliasing=5,
color_to_rgb=rgb)
if self.area_filter:
keep_idxes = []
print(len(self.csv_dict['x']))
for i in range(self.sequence_len):
x = pd.Series(np.array(self.csv_dict['area'])[:,i])
keep_idxes.append(x.between(x.quantile(self.area_filter/2), x.quantile(1-(self.area_filter/2))))
for k in self.csv_dict.keys():
y = | pd.Series(self.csv_dict[k]) | pandas.Series |
from pathlib import Path
from multiprocessing import Pool
from opencadd.databases.klifs import setup_remote
import pandas as pd
CACHE_DIR = Path("../data/.cache")
CACHE_DIR.mkdir(parents=True, exist_ok=True)
def read_klifs_ligand(structure_id: int, directory):
"""Retrieve and read an orthosteric kinase ligand from KLIFS."""
from pathlib import Path
from kinoml.modeling.OEModeling import read_molecules
from kinoml.utils import LocalFileStorage
file_path = LocalFileStorage.klifs_ligand_mol2(structure_id, Path(directory))
if not file_path.is_file():
from opencadd.databases.klifs import setup_remote
remote = setup_remote()
try:
mol2_text = remote.coordinates.to_text(structure_id, entity="ligand", extension="mol2")
except ValueError:
print(f"Unable to fetch ligand coordinates of structure with KLIFS ID {structure_id}.")
return None
with open(file_path, "w") as wf:
wf.write(mol2_text)
molecule = read_molecules(file_path)[0]
return molecule
def count_ligands(pdb_id, chain_id, expo_id, directory):
"""Count the ligands in the given PDB entry."""
from openeye import oechem
from kinoml.modeling.OEModeling import read_molecules, select_chain, remove_non_protein
from kinoml.databases.pdb import download_pdb_structure
structure_path = download_pdb_structure(pdb_id, directory)
if pdb_id:
structure = read_molecules(structure_path)[0]
else:
raise ValueError(f"Could not download PDB entry {pdb_id}!")
structure = select_chain(structure, chain_id)
structure = remove_non_protein(structure, exceptions=[expo_id])
hierview = oechem.OEHierView(structure)
count = sum([1 for residue in hierview.GetResidues() if residue.GetResidueName() == expo_id])
return count
def get_docking_template(structure, docking_templates, cache_dir):
"""Get most similar docking template in the same conformation excluding itself."""
from kinoml.modeling.OEModeling import (
read_smiles,
generate_reasonable_conformations,
overlay_molecules
)
# filter for conformation
docking_templates = docking_templates[
docking_templates["structure.dfg"] == structure["structure.dfg"]
]
docking_templates = docking_templates[
docking_templates["structure.ac_helix"] == structure["structure.ac_helix"]
]
# remove itself
docking_templates = docking_templates[
docking_templates["structure.pdb_id"] != structure["structure.pdb_id"]
]
# find entry with most similar ligand
complex_ligands = [
read_klifs_ligand(structure_id, cache_dir) for structure_id
in docking_templates["structure.klifs_id"]
]
complex_ligands = [complex_ligand for complex_ligand in complex_ligands if complex_ligand]
conformations_ensemble = generate_reasonable_conformations(read_smiles(structure["smiles"]))
overlay_scores = []
for conformations in conformations_ensemble:
overlay_scores += [
[i, overlay_molecules(complex_ligand, conformations)[0]]
for i, complex_ligand in enumerate(complex_ligands)
]
docking_template_index, docking_template_similarity = sorted(
overlay_scores, key=lambda x: x[1], reverse=True
)[0]
docking_template = docking_templates.iloc[docking_template_index]
# return dictionary with key pdb_id_expo_id and dictionary as value with docking template
# pdb_id, expo_id and chain_id
docking_template_dict = {
f"{structure['structure.pdb_id']}_{structure['ligand.expo_id']}" : {
"docking_template_pdb_id": docking_template["structure.pdb_id"],
"docking_template_chain_id": docking_template["structure.chain"],
"docking_template_expo_id": docking_template["ligand.expo_id"],
"docking_template_similarity": docking_template_similarity,
}
}
return docking_template_dict
if __name__ == "__main__":
print("Reading benchmark dataframe ...")
structures = pd.read_csv("../data/docking_benchmark_dataset.csv")
print("Retrieving available docking templates ...")
remote = setup_remote()
docking_templates = remote.structures.all_structures()
print("Filtering available docking templates")
# orthosteric ligand
docking_templates = docking_templates[
docking_templates["ligand.expo_id"] != "-"
]
# single orthosteric ligand
docking_templates = docking_templates.groupby("structure.pdb_id").filter(
lambda x: len(set(x["ligand.expo_id"])) == 1
)
# remove structures with ligands not handled by oespruce
docking_templates = docking_templates[docking_templates["ligand.expo_id"] != "A"]
# sort by quality
docking_templates = docking_templates.sort_values(
by=[
"structure.qualityscore", "structure.resolution", "structure.chain",
"structure.alternate_model"
],
ascending=[False, True, True, True]
)
# keep highest quality structure per PDB ID
docking_templates = docking_templates.groupby("structure.pdb_id").head(1)
# remove structues with multiple instances of the same ligand
multiple_ligands_indices = []
erroneous_indices = []
for index, structure in docking_templates.iterrows():
try:
if count_ligands(structure["structure.pdb_id"], structure["structure.chain"],
structure["ligand.expo_id"], CACHE_DIR) > 1:
multiple_ligands_indices.append(index)
except ValueError:
print("Error counting ligands:")
print(
structure["structure.pdb_id"],
structure["structure.chain"],
structure["ligand.expo_id"]
)
erroneous_indices.append(index)
docking_templates = docking_templates[~docking_templates.index.isin(multiple_ligands_indices)]
docking_templates = docking_templates[~docking_templates.index.isin(erroneous_indices)]
print("Downloading ligand structures of filtered docking templates ...")
unavailable_ligand_indices = []
for index, docking_template in docking_templates.iterrows():
if not read_klifs_ligand(docking_template["structure.klifs_id"], CACHE_DIR):
unavailable_ligand_indices.append(index)
docking_templates = docking_templates[~docking_templates.index.isin(unavailable_ligand_indices)]
print("Getting docking templates ...")
with Pool(processes=50) as pool:
results = pool.starmap(
get_docking_template,
[(structure, docking_templates, CACHE_DIR) for i, structure in structures.iterrows()]
)
print("Merging and saving results ...")
results_merged = {}
for result in results:
results_merged.update(result)
results_merged = | pd.DataFrame.from_dict(results_merged, orient="index") | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler, OrdinalEncoder
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.over_sampling import SMOTE
#from secret import access_key, secret_access_key
import joblib
import streamlit as st
import boto3
import tempfile
import json
import requests
from streamlit_lottie import st_lottie_spinner
train_original = pd.read_csv('https://raw.githubusercontent.com/semasuka/Credit-card-approval-prediction-classification/main/datasets/train.csv')
test_original = pd.read_csv('https://raw.githubusercontent.com/semasuka/Credit-card-approval-prediction-classification/main/datasets/test.csv')
full_data = pd.concat([train_original, test_original], axis=0)
full_data = full_data.sample(frac=1).reset_index(drop=True)
def data_split(df, test_size):
train_df, test_df = train_test_split(df, test_size=test_size, random_state=42)
return train_df.reset_index(drop=True), test_df.reset_index(drop=True)
train_original, test_original = data_split(full_data, 0.2)
train_copy = train_original.copy()
test_copy = test_original.copy()
def value_cnt_norm_cal(df,feature):
'''
Function to calculate the count of each value in a feature and normalize it
'''
ftr_value_cnt = df[feature].value_counts()
ftr_value_cnt_norm = df[feature].value_counts(normalize=True) * 100
ftr_value_cnt_concat = pd.concat([ftr_value_cnt, ftr_value_cnt_norm], axis=1)
ftr_value_cnt_concat.columns = ['Count', 'Frequency (%)']
return ftr_value_cnt_concat
class OutlierRemover(BaseEstimator, TransformerMixin):
def __init__(self,feat_with_outliers = ['Family member count','Income', 'Employment length']):
self.feat_with_outliers = feat_with_outliers
def fit(self,df):
return self
def transform(self,df):
if (set(self.feat_with_outliers).issubset(df.columns)):
# 25% quantile
Q1 = df[self.feat_with_outliers].quantile(.25)
# 75% quantile
Q3 = df[self.feat_with_outliers].quantile(.75)
IQR = Q3 - Q1
# keep the data within 1.5 IQR
df = df[~((df[self.feat_with_outliers] < (Q1 - 3 * IQR)) |(df[self.feat_with_outliers] > (Q3 + 3 * IQR))).any(axis=1)]
return df
else:
print("One or more features are not in the dataframe")
return df
class DropFeatures(BaseEstimator,TransformerMixin):
def __init__(self,feature_to_drop = ['Has a mobile phone','Children count','Job title','Account age']):
self.feature_to_drop = feature_to_drop
def fit(self,df):
return self
def transform(self,df):
if (set(self.feature_to_drop).issubset(df.columns)):
df.drop(self.feature_to_drop,axis=1,inplace=True)
return df
else:
print("One or more features are not in the dataframe")
return df
class TimeConversionHandler(BaseEstimator, TransformerMixin):
def __init__(self, feat_with_days = ['Employment length', 'Age']):
self.feat_with_days = feat_with_days
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if (set(self.feat_with_days).issubset(X.columns)):
# convert days to absolute value
X[['Employment length','Age']] = np.abs(X[['Employment length','Age']])
return X
else:
print("One or more features are not in the dataframe")
return X
class RetireeHandler(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, df):
return self
def transform(self, df):
if 'Employment length' in df.columns:
# select rows with employment length is 365243 which corresponds to retirees
df_ret_idx = df['Employment length'][df['Employment length'] == 365243].index
# change 365243 to 0
df.loc[df_ret_idx,'Employment length'] = 0
return df
else:
print("Employment length is not in the dataframe")
return df
class SkewnessHandler(BaseEstimator, TransformerMixin):
def __init__(self,feat_with_skewness=['Income','Age']):
self.feat_with_skewness = feat_with_skewness
def fit(self,df):
return self
def transform(self,df):
if (set(self.feat_with_skewness).issubset(df.columns)):
# Handle skewness with cubic root transformation
df[self.feat_with_skewness] = np.cbrt(df[self.feat_with_skewness])
return df
else:
print("One or more features are not in the dataframe")
return df
class BinningNumToYN(BaseEstimator, TransformerMixin):
def __init__(self,feat_with_num_enc=['Has a work phone','Has a phone','Has an email']):
self.feat_with_num_enc = feat_with_num_enc
def fit(self,df):
return self
def transform(self,df):
if (set(self.feat_with_num_enc).issubset(df.columns)):
# Change 0 to N and 1 to Y for all the features in feat_with_num_enc
for ft in self.feat_with_num_enc:
df[ft] = df[ft].map({1:'Y',0:'N'})
return df
else:
print("One or more features are not in the dataframe")
return df
class OneHotWithFeatNames(BaseEstimator,TransformerMixin):
def __init__(self,one_hot_enc_ft = ['Gender', 'Marital status', 'Dwelling', 'Employment status', 'Has a car', 'Has a property', 'Has a work phone', 'Has a phone', 'Has an email']):
self.one_hot_enc_ft = one_hot_enc_ft
def fit(self,df):
return self
def transform(self,df):
if (set(self.one_hot_enc_ft).issubset(df.columns)):
# function to one hot encode the features in one_hot_enc_ft
def one_hot_enc(df,one_hot_enc_ft):
one_hot_enc = OneHotEncoder()
one_hot_enc.fit(df[one_hot_enc_ft])
# get the result of the one hot encoding columns names
feat_names_one_hot_enc = one_hot_enc.get_feature_names_out(one_hot_enc_ft)
# change the array of the one hot encoding to a dataframe with the column names
df = pd.DataFrame(one_hot_enc.transform(df[self.one_hot_enc_ft]).toarray(),columns=feat_names_one_hot_enc,index=df.index)
return df
# function to concatenat the one hot encoded features with the rest of features that were not encoded
def concat_with_rest(df,one_hot_enc_df,one_hot_enc_ft):
# get the rest of the features
rest_of_features = [ft for ft in df.columns if ft not in one_hot_enc_ft]
# concatenate the rest of the features with the one hot encoded features
df_concat = pd.concat([one_hot_enc_df, df[rest_of_features]],axis=1)
return df_concat
# one hot encoded dataframe
one_hot_enc_df = one_hot_enc(df,self.one_hot_enc_ft)
# returns the concatenated dataframe
full_df_one_hot_enc = concat_with_rest(df,one_hot_enc_df,self.one_hot_enc_ft)
print(full_df_one_hot_enc.tail(25))
return full_df_one_hot_enc
else:
print("One or more features are not in the dataframe")
return df
class OrdinalFeatNames(BaseEstimator,TransformerMixin):
def __init__(self,ordinal_enc_ft = ['Education level']):
self.ordinal_enc_ft = ordinal_enc_ft
def fit(self,df):
return self
def transform(self,df):
if 'Education level' in df.columns:
ordinal_enc = OrdinalEncoder()
df[self.ordinal_enc_ft] = ordinal_enc.fit_transform(df[self.ordinal_enc_ft])
return df
else:
print("Education level is not in the dataframe")
return df
class MinMaxWithFeatNames(BaseEstimator,TransformerMixin):
def __init__(self,min_max_scaler_ft = ['Age', 'Income', 'Employment length']):
self.min_max_scaler_ft = min_max_scaler_ft
def fit(self,df):
return self
def transform(self,df):
if (set(self.min_max_scaler_ft).issubset(df.columns)):
min_max_enc = MinMaxScaler()
df[self.min_max_scaler_ft] = min_max_enc.fit_transform(df[self.min_max_scaler_ft])
return df
else:
print("One or more features are not in the dataframe")
return df
class ChangeToNumTarget(BaseEstimator,TransformerMixin):
def __init__(self):
pass
def fit(self,df):
return self
def transform(self,df):
if 'Is high risk' in df.columns:
df['Is high risk'] = pd.to_numeric(df['Is high risk'])
return df
else:
print("Is high risk is not in the dataframe")
return df
class OversampleSMOTE(BaseEstimator,TransformerMixin):
def __init__(self):
pass
def fit(self,df):
return self
def transform(self,df):
if 'Is high risk' in df.columns:
# SMOTE function to oversample the minority class to fix the imbalance data
smote = SMOTE()
X_bal, y_bal = smote.fit_resample(df.iloc[:,:-1],df.iloc[:,-1])
X_y_bal = pd.concat([pd.DataFrame(X_bal), | pd.DataFrame(y_bal) | pandas.DataFrame |
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
import string
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
from pandas.io.common import urlopen
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: Tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def randbool(size=(), p: float = 0.5):
return np.random.rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return | pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs) | pandas.period_range |
import pandas as pd
from app.archive_constants import (LABEL)
import warnings
from pandas.core.common import SettingWithCopyWarning
pd.set_option('mode.chained_assignment', None)
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def extract_cell_metdata(df_c_md):
""" Build cell metadata """
df_cell_md = pd.DataFrame()
df_cell_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]
df_cell_md[LABEL.ANODE.value] = [df_c_md[LABEL.ANODE.value]]
df_cell_md[LABEL.CATHODE.value] = [df_c_md[LABEL.CATHODE.value]]
df_cell_md[LABEL.SOURCE.value] = [df_c_md[LABEL.SOURCE.value]]
df_cell_md[LABEL.AH.value] = [df_c_md[LABEL.AH.value]]
df_cell_md[LABEL.FORM_FACTOR.value] = [df_c_md[LABEL.FORM_FACTOR.value]]
df_cell_md[LABEL.TEST.value] = [df_c_md[LABEL.TEST.value]]
# df_cell_md[LABEL.MAPPING.value] = [df_c_md[LABEL.MAPPING.value]]
df_cell_md[LABEL.TESTER.value] = [df_c_md[LABEL.TESTER.value]]
return df_cell_md
def split_cycle_metadata(df_c_md):
df_cell_md = extract_cell_metdata(df_c_md)
# Build test metadata
df_test_md = pd.DataFrame()
df_test_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]
df_test_md[LABEL.CRATE_C.value] = [df_c_md[LABEL.CRATE_C.value]]
df_test_md[LABEL.CRATE_D.value] = [df_c_md[LABEL.CRATE_D.value]]
df_test_md[LABEL.SOC_MAX.value] = [df_c_md[LABEL.SOC_MAX.value]]
df_test_md[LABEL.SOC_MIN.value] = [df_c_md[LABEL.SOC_MIN.value]]
df_test_md[LABEL.TEMP.value] = [df_c_md[LABEL.TEMP.value]]
return df_cell_md, df_test_md
def split_abuse_metadata(df_c_md):
df_cell_md = extract_cell_metdata(df_c_md)
# Build test metadata
df_test_md = pd.DataFrame()
df_test_md[LABEL.CELL_ID.value] = [df_c_md[LABEL.CELL_ID.value]]
df_test_md[LABEL.THICKNESS.value] = [df_c_md[LABEL.THICKNESS.value]]
df_test_md[LABEL.V_INIT.value] = [df_c_md[LABEL.V_INIT.value]]
df_test_md[LABEL.INDENTOR.value] = [df_c_md[LABEL.INDENTOR.value]]
df_test_md[LABEL.NAIL_SPEED.value] = [df_c_md[LABEL.NAIL_SPEED.value]]
df_test_md[LABEL.TEMP.value] = [df_c_md[LABEL.TEMP.value]]
return df_cell_md, df_test_md
# sort data imported to insure cycle index and test times are correctly calculated
def sort_timeseries(df_tmerge):
# Arrange the data by date time first, then by test time
# Rebuild Cycle Index and test time to increment from file to file
# This method does not depend on data from a specific testers
if not df_tmerge.empty:
df_t = df_tmerge.sort_values(
by=[LABEL.DATE_TIME.value, LABEL.TEST_TIME.value])
df_t = df_t.reset_index(drop=True)
cycles = df_t[[
LABEL.CYCLE_INDEX_FILE.value, LABEL.CYCLE_INDEX.value,
LABEL.FILENAME.value, LABEL.TEST_TIME.value
]].to_numpy()
max_cycle = 1
past_index = 1
max_time = 0
last_file = ""
delta_t = 0
start = 0
for x in cycles:
if start == 0:
last_file = x[2]
start += 1
if x[2] != last_file:
delta_t = max_time
x[3] += delta_t
last_file = x[2]
else:
x[3] += delta_t
max_time = x[3]
last_file = x[2]
if x[0] < max_cycle:
if past_index == x[0]:
past_index = x[0]
x[1] = max_cycle
else:
past_index = x[0]
x[1] = max_cycle + 1
max_cycle = x[1]
else:
past_index = x[0]
max_cycle = x[0]
x[1] = x[0]
df_tmp = pd.DataFrame(data=cycles[:, [1]],
columns=[LABEL.CYCLE_INDEX.value])
df_t[LABEL.CYCLE_INDEX.value] = df_tmp[LABEL.CYCLE_INDEX.value]
df_tmp = pd.DataFrame(data=cycles[:, [3]],
columns=[LABEL.TEST_TIME.value])
df_t[LABEL.TEST_TIME.value] = pd.to_numeric(
df_tmp[LABEL.TEST_TIME.value])
df_ts = df_t.sort_values(by=[LABEL.TEST_TIME.value])
# Remove quantities only needed to tag files
df_ts.drop(LABEL.FILENAME.value, axis=1, inplace=True)
df_ts.drop(LABEL.CYCLE_INDEX_FILE.value, axis=1, inplace=True)
return df_ts
# calculate statistics for abuse test
def calc_abuse_stats(df_t, df_test_md):
for _ in df_t.index:
df_t[LABEL.NORM_D.value] = df_t.iloc[
0:, df_t.columns.get_loc(LABEL.AXIAL_D.value)] - df_t[
LABEL.AXIAL_D.value][0]
df_t[LABEL.STRAIN.value] = df_t.iloc[
0:, df_t.columns.get_loc(LABEL.NORM_D.value)] / df_test_md[
LABEL.THICKNESS.value]
return df_t
def calc_cycle_stats(df_t):
df_t[LABEL.CYCLE_TIME.value] = 0
no_cycles = int(df_t[LABEL.CYCLE_INDEX.value].max())
# Initialize the cycle_data time frame
a = [0 for _ in range(no_cycles)] # using loops
df_c = pd.DataFrame(data=a, columns=[LABEL.CYCLE_INDEX.value])
df_c[LABEL.CELL_ID.value] = df_t[LABEL.CELL_ID.value]
df_c[LABEL.CYCLE_INDEX.value] = 0
df_c[LABEL.V_MAX.value] = 0
df_c[LABEL.I_MAX.value] = 0
df_c[LABEL.V_MIN.value] = 0
df_c[LABEL.I_MIN.value] = 0
df_c[LABEL.AH_C.value] = 0
df_c[LABEL.AH_D.value] = 0
df_c[LABEL.E_C.value] = 0
df_c[LABEL.E_D.value] = 0
df_c[LABEL.V_C_MEAN.value] = 0
df_c[LABEL.V_D_MEAN.value] = 0
df_c[LABEL.TEST_TIME.value] = 0
df_c[LABEL.AH_EFF.value] = 0
df_c[LABEL.E_EFF.value] = 0
for c_ind in df_c.index:
x = c_ind + 1
df_f = df_t[df_t[LABEL.CYCLE_INDEX.value] == x]
df_f[LABEL.AH_C.value] = 0
df_f[LABEL.E_C.value] = 0
df_f[LABEL.AH_D.value] = 0
df_f[LABEL.E_D.value] = 0
if not df_f.empty:
try:
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.CYCLE_INDEX.value)] = x
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.V_MAX.value)] = df_f.loc[
df_f[LABEL.V.value].idxmax()].v
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.V_MIN.value)] = df_f.loc[
df_f[LABEL.V.value].idxmin()].v
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.I_MAX.value)] = df_f.loc[
df_f[LABEL.I.value].idxmax()].i
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.I_MIN.value)] = df_f.loc[
df_f[LABEL.I.value].idxmin()].i
df_c.iloc[
c_ind,
df_c.columns.get_loc(LABEL.TEST_TIME.value)] = df_f.loc[
df_f[LABEL.TEST_TIME.value].idxmax()].test_time
df_f[LABEL.DT.value] = df_f[LABEL.TEST_TIME.value].diff() / 3600.0
df_f_c = df_f[df_f[LABEL.I.value] > 0]
df_f_d = df_f[df_f[LABEL.I.value] < 0]
df_f = calc_cycle_quantities(df_f)
df_t.loc[df_t.cycle_index == x,
LABEL.CYCLE_TIME.value] = df_f[LABEL.CYCLE_TIME.value]
df_t.loc[df_t.cycle_index == x,
LABEL.AH_C.value] = df_f[LABEL.AH_C.value]
df_t.loc[df_t.cycle_index == x,
LABEL.E_C.value] = df_f[LABEL.E_C.value]
df_t.loc[df_t.cycle_index == x,
LABEL.AH_D.value] = df_f[LABEL.AH_D.value]
df_t.loc[df_t.cycle_index == x,
LABEL.E_D.value] = df_f[LABEL.E_D.value]
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.AH_C.value)] = df_f[
LABEL.AH_C.value].max()
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.AH_D.value)] = df_f[
LABEL.AH_D.value].max()
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_C.value)] = df_f[
LABEL.E_C.value].max()
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_D.value)] = df_f[
LABEL.E_D.value].max()
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.V_C_MEAN.value)] = df_f_c[
LABEL.V.value].mean()
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.V_D_MEAN.value)] = df_f_d[
LABEL.V.value].mean()
if df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.AH_C.value)] == 0:
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.AH_EFF.value)] = 0
else:
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_EFF.value)] = df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_D.value)] / \
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.AH_C.value)]
if df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.E_C.value)] == 0:
df_c.iloc[c_ind,
df_c.columns.get_loc(LABEL.E_EFF.value)] = 0
else:
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_EFF.value)] = df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_D.value)] / \
df_c.iloc[c_ind, df_c.columns.get_loc(LABEL.E_C.value)]
except Exception as e:
pass
df_cc = df_c[df_c[LABEL.CYCLE_INDEX.value] > 0]
df_tt = df_t[df_t[LABEL.CYCLE_INDEX.value] > 0]
return df_cc, df_tt
# unpack the dataframe and calculate quantities used in statistics
def calc_cycle_quantities(df):
tmp_arr = df[[
LABEL.TEST_TIME.value, LABEL.I.value, LABEL.V.value, LABEL.AH_C.value,
LABEL.E_C.value, LABEL.AH_D.value, LABEL.E_D.value,
LABEL.CYCLE_TIME.value
]].to_numpy()
start = 0
last_time = 0
last_i_c = 0
last_v_c = 0
last_i_d = 0
last_v_d = 0
last_ah_c = 0
last_e_c = 0
last_ah_d = 0
last_e_d = 0
initial_time = 0
for x in tmp_arr:
if start == 0:
start += 1
initial_time = x[0]
else:
if x[1] >= 0:
x[3] = (x[0] - last_time) * (x[1] + last_i_c) * 0.5 + last_ah_c
x[4] = (x[0] - last_time) * (x[1] + last_i_c) * 0.5 * (
x[2] + last_v_c) * 0.5 + last_e_c
last_i_c = x[1]
last_v_c = x[2]
last_ah_c = x[3]
last_e_c = x[4]
if x[1] <= 0:
x[5] = (x[0] - last_time) * (x[1] + last_i_d) * 0.5 + last_ah_d
# if x[5] == 0:
# print("x5=0:" + str(x[5]) + " last_ah_d: " +
# str(last_ah_d))
# if last_ah_d == 0:
# print("x5:" + str(x[5]) + " last_ah_d=0: " +
# str(last_ah_d))
x[6] = (x[0] - last_time) * (x[1] + last_i_d) * 0.5 * (
x[2] + last_v_d) * 0.5 + last_e_d
last_i_d = x[1]
last_v_d = x[2]
last_ah_d = x[5]
last_e_d = x[6]
x[7] = x[0] - initial_time
last_time = x[0]
df_tmp = pd.DataFrame(data=tmp_arr[:, [3]], columns=[LABEL.AH_C.value])
df_tmp.index += df.index[0]
df[LABEL.AH_C.value] = df_tmp[LABEL.AH_C.value] / 3600.0
df_tmp = | pd.DataFrame(data=tmp_arr[:, [4]], columns=[LABEL.E_C.value]) | pandas.DataFrame |
#!/usr/bin/env python
debug = True
if debug: print('debug = True')
computaGenero = True
if debug: print('computaGenero = ' + str(computaGenero))
computaCursos = True
if debug: print('computaCursos = ' + str(computaCursos))
computaRecorrencia = True # Também vale para retenção e evasão
if debug: print('computaRecorrencia = ' + str(computaRecorrencia))
entrada = True
if debug: print('entrada = ' + str(entrada))
incremental = False # Só tem valor caso entrada tenha valor True. Faz com que o que já estiver na saída seja mantido
if debug: print('incremental = ' + str(incremental))
graficos = True
if debug: print('graficos = ' + str(graficos))
if debug: print('')
import json
import pandas as pd
import os
from pathlib import Path
import re
import errno
from enum import Enum
from collections import namedtuple
import numpy as np
if computaGenero:
from genderize import Genderize
genderize = Genderize()
if graficos:
import matplotlib.pyplot as plt
#plt.close('all')
if computaRecorrencia:
from unidecode import unidecode
from operator import itemgetter
from similarity.jarowinkler import JaroWinkler
jw = JaroWinkler()
if graficos:
import itertools
import calendar
# Colunas que não fazem parte da entrada devem ter o valor de Expressão em branco
# Colunas com valor de Expressão tem seus nomes substituídos pela Descrição
Coluna = namedtuple('Coluna', ['Descrição', 'Expressão'])
class Colunas(Enum):
@property
def Descrição(self):
'''Nome da coluna.'''
return self.value[0].Descrição
@property
def Expressão(self):
'''Regex da coluna.'''
return self.value[0].Expressão
# Cuidado para não esquecer da vírgula no final da cada linha
Nome = Coluna('Nome', r'NOME'),
RG = Coluna('RG', r'Documento de Identidade|^R\.?G\.?$'),
CPF = Coluna('CPF', r'CPF'),
Curso = Coluna('Curso', r'CURSO'),
ID = Coluna('ID', None),
Ação = Coluna('Ação', None),
Evasão = Coluna('Evasão', None),
Evasora = Coluna('Evasora', None),
Gênero = Coluna('Gênero', None),
Porcentagem = Coluna('Porcentagem', None),
Retenção = Coluna('Retenção', None),
Retentora = Coluna('Retentora', None),
Quantidade = Coluna('Quantidade', None),
Válidos = Coluna('Qtde. voluntários válidos', None),
try:
with open("../Saida/generos.json") as json_file:
generos = json.load(json_file)
if debug: print('Lendo Saida/generos.json')
except FileNotFoundError:
if debug: print('Saida/generos.json não encontrado')
# Nomes podem ser adicionados aqui (ou no arquivo Saida/generos.json) caso não seja encontrado pela Genderize
generos = {
'ALDREI': 'm',
'EDIPO': 'm',
'FABRICIO': 'm',
'HYTALO': 'm',
'JOLINDO': 'm',
'KAWE': 'm',
'MASSARU': 'm',
'OTAVIO': 'm',
'VINICIUS': 'm',
'CARINE': 'f',
'CASSIA': 'f',
'FLAVIA': 'f',
'FRANCYELE': 'f',
'GABRIELLA': 'f',
'HELOISA': 'f',
'IHANNA': 'f',
'JENYFFER': 'f',
'JESSICA': 'f',
'JULIA': 'f',
'LAIS': 'f',
'LETICIA': 'f',
'LIGIA': 'f',
'MAITHE': 'f',
'MARIANGELA': 'f',
'MARINEIA': 'f',
'MONICA': 'f',
'NAIADY': 'f',
'NATHALIA': 'f',
'NATHALLI': 'f',
'STHEFANIE': 'f',
'TAIZA': 'f',
'TAMILES': 'f',
'TAIS': 'f',
'TASSIANY': 'f',
'TATIANY': 'f',
'THASSIA': 'f',
'VERONICA': 'f',
}
# Expressões podem ser adicionadas aqui para ignorar nomes de voluntários
# Ignorar implica em não fazer análise de gênero, recorrência, retenção e evasão
nomesExcluidos = [re.compile(expressao, re.I) for expressao in [
r'confirmou',
]]
# A ordem em que os cursos aparecem no dicionário é importante, visto que a busca respeita essa ordem
# Exemplo: "educação física" deve aparecer antes de "física"
cursos = dict((curso, re.compile(expressao, re.I)) for curso, expressao in {
'Engenharia elétrica': r'el[eé]trica',
'Psicologia': r'psico',
'Comunicação social: jornalismo': r'jornal',
'Medicina': r'medicina|fmrp',
'Mestrando': r'mestrado|mestrando',
'Ciência da computação': r'ci[êe]ncias?\s+da\s+computa[cç][aã]o|bcc',
'Engenharia mecânica': r'mec[aâ]nica',
'Engenharia de produção': r'produ[cç][aã]o',
'Engenharia civil': r'civil',
'Economia Empresarial e Controladoria': r'ecec',
'Não universitário': r't[eé]cnic[oa]|n[aã]o\s+cursante|completo|etec|trabalho|profissional|convidad[ao]|extern[ao]|palestra|volunt[aá]ri[ao]|nenhum|socorrista|cursinho|vestibula|nutricionista|enfermeira|formad[oa]|consultora|decoradora|estudante|fiscal|terapeuta|banc[aá]ria|psic[oó]log[ao]|assessora|empres[áa]ri[ao]|noite|professor|desempregad[ao]|mãe|graduad[ao]',
'Meteorologia': r'meteoro',
'Educação física': r'(educa[çc][ãa]o|ed\.?)\s+f[íi]sica',
'Física': r'f[ií]sica',
'Doutorando': r'doutorado',
'Ciências biológicas': r'biologia|biol[oó]gicas|^bio',
'Química': r'qu[íi]mica',
'Administração': r'adm',
'Música': r'^m[úu]sica',
'Matemática aplicada a negócios': r'^man|neg[óo]cio',
'Engenharia química': r'engenharia\s+qu[íi]mica|eng\s+qu[íi]mica',
'Fisioterapia': r'fisio',
'Ciências contábeis': r'cont',
'Economia': r'econo',
'Pedagogia': r'^pedago',
'Biblioteconomia e Ciência da Informação': r'^BCI',
'Universitário: curso não informado': r'^unaerp|cultura|ufpa|ffclrp|^unesp|^integral\s+manh[aã]|^fea',
'Pós graduando': r'p[óo]s\s+gradua[çc][ãa]o',
'Agronomia': r'agro',
'Análise e desenvolvimento de sistemas': r'an[áa]lise',
'Arquitetura': r'arq',
'Artes visuais': r'artes',
'Biotecnologia': r'^biotecnologia',
'Ciências biomédicas': r'ci[eê]ncias\s+biom[eé]dicas',
'Comunicação social: radialismo': r'rtv|radialismo|r[aá]dio\s+e\s+tv',
'Dança, grafiti e teatro': r'teatro',
'Design': r'design',
'Direito': r'^direito',
'Ecologia': r'^ecologia',
'Enfermagem': r'enfermagem|eerp',
'Engenharia ambiental': r'amb',
'Engenharia de biossistemas': r'biossistemas',
'Engenharia da computação': r'engenharia\s+d[ae]\s+computa[cç][aã]o',
'Engenharia florestal': r'florestal',
'Farmácia': r'^farm[áa]cia|fcfrp',
'Filosofia': r'^filo',
'Fonoaudiologia': r'^fono',
'Genética': r'gen[ée]tica',
'Informática biomédica': r'inform[áa]tica\s+biom[eé]dica|^ibm',
'Letras': r'^letras',
'Marketing': r'marketing|mkt',
'Nutrição e metabolismo': r'nutri[çc][ãa]o',
'Medicina veterinária': r'veterin[áa]ria',
'Teologia': r'^teologia',
'Terapia ocupacional': r'ocupacional|t.o',
'Odontologia': r'^odonto|forp',
'Publicidade e propaganda': r'publicidade|pp',
'Recursos humanos': r'recursos\s+humanos|rh',
'Relações públicas': r'rela[cç][oõ]es\s+p[uú]blicas|rp',
'Serviço social': r'social',
'Sistemas de informação': r'sistemas|^b?si$',
}.items())
listaCursos = [curso for curso, _ in cursos.items()]
loteGeneros = {}
dfs = {}
desc = pd.DataFrame()
pessoas = pd.DataFrame(columns = [Colunas.ID.Descrição, Colunas.Nome.Descrição, Colunas.RG.Descrição, Colunas.CPF.Descrição, Colunas.Quantidade.Descrição, Colunas.Retentora.Descrição, Colunas.Evasora.Descrição, Colunas.Curso.Descrição, Colunas.Gênero.Descrição])
lastID = 0
def createDir(path):
'''Cria o diretório do caminho indicado, caso não exista.'''
directory = os.path.dirname(path)
if not os.path.exists(directory):
if debug: print('Criando diretório ' + directory)
try:
os.makedirs(directory, exist_ok = True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def output(dataframe, path):
'''Salva o dataframe como csv no caminho indicado.'''
filename = "../Saida/" + path + "/output.csv"
createDir(filename)
if debug: print('Escrevendo ' + path + '/output.csv')
dataframe.to_csv(filename, index = False, float_format = '%.f')
def incluiAcao(path):
'''
A partir de um arquivo csv, faz a inclusão da ação.
Nenhuma análise é feita e nenhum arquivo é gerado.
São preenchidas as variáveis globais `dfs`, `pessoas` e `loteGeneros`.
'''
ids = []
if debug: print('Lendo ' + path + ".csv")
df = pd.read_csv("../Dados/" + path + ".csv", true_values = ['Sim'], false_values = ['Não'])
if debug: print('Removendo colunas desnecessárias')
df = df.loc[:, df.columns.str.contains('|'.join([coluna.Expressão for coluna in Colunas if coluna.Expressão]), case = False)]
# Renomeia colunas para que fiquem de forma homogêna, seguindo a propriedade `Descrição`
if debug: print('Renomeando colunas')
def columnIterator():
'''Retorna apenas as colunas que correspondem a alguma das expressões em `Colunas`.'''
for coluna in Colunas:
if coluna.Expressão:
for col in df.columns:
if re.search(coluna.Expressão, col, re.I):
yield (col, coluna.Descrição)
break
df.rename(columns = dict(columnIterator()), inplace = True)
if debug: print('Limpando valores')
df.replace(r'\t', ' ', regex = True, inplace = True) # Substitui tabs por espaços
df.replace(r'\s{2,}', ' ', regex = True, inplace = True) # Remove espaços em sequência
df.replace(r'^\s+|\s+$', '', regex = True, inplace = True) # Leading and trailing trimming
df.replace(r'^$', None, regex = True, inplace = True) # Transforma vazio em None
if debug: print('Removendo linhas totalmente em branco')
df.dropna(axis = 'index', how = 'all', inplace = True)
# Após remover linhas e colunas não desejadas, refaz os índices
if debug: print('Refazendo índices')
df.reset_index(drop = True, inplace = True)
if debug: print('')
for i in df.index:
temNome = False
if Colunas.Nome.Descrição in df:
value = df.at[i, Colunas.Nome.Descrição]
if pd.isnull(value):
if debug: print('Sem nome')
elif any([reg.search(value) for reg in nomesExcluidos]):
df.at[i, Colunas.Nome.Descrição] = None
if debug: print('Sem nome')
else:
# Remove 'pipes' do nome, pois é utilizado como separador na análise de recorrência
value = re.sub(r'\|', '', value)
if value == '':
df.at[i, Colunas.Nome.Descrição] = None
if debug: print('Sem nome')
else:
temNome = True
nome = df.at[i, Colunas.Nome.Descrição] = value
if debug: print(value)
elif debug: print('Sem nome')
def validaDocumento(coluna):
'''Efetua a validação de CPF ou RG, recuperando apénas dígitos (números em qualquer posição e `x` ou `X` na última posição).'''
if coluna in df:
value = df.at[i, coluna]
if pd.notnull(value):
try:
int(value) # Se já é int, então não tem caracteres especiais...
if debug: print(coluna + ': ' + str(value))
except ValueError:
newValue = re.sub(r'[^0-9xX]|[xX].', '', value) # Remove caracteres especiais do documento (deixa apenas números)
df.at[i, coluna] = None if newValue == '' else newValue
if debug: print(coluna + ': ' + value + ' -> ' + newValue)
validaDocumento(Colunas.RG.Descrição)
validaDocumento(Colunas.CPF.Descrição)
# Análise de recorrência
def analiseRecorrencia(*args):
'''Busca recorrência por correspondência nas colunas indicadas.'''
def analiseCurso():
'''Atribuição imediata do curso de acordo com as expressões definidas no cabeçalho do arquivo. Joga exception caso não encontre.'''
nome = df.at[i, Colunas.Curso.Descrição]
if pd.isnull(nome):
if debug: print('Curso não preenchido')
else:
try:
curso = next(curso for curso, reg in cursos.items() if reg.search(nome))
if debug: print('Curso: ' + nome + ' -> ' + curso)
return curso
except StopIteration:
raise Exception('Curso desconhecido: ' + nome)
def analiseGenero(ID):
'''
Caso o gênero esteja no dicionário local, a atribuição é imediata.
Caso contrário, nome é adicionado ao lote a ser buscado após o fim das inclusões na função `computaGeneros`.
'''
primeiroNome = nome.split()[0]
nomeSemAcento = unidecode(primeiroNome.upper())
genero = generos.get(nomeSemAcento)
if genero:
pessoas.loc[pessoas[Colunas.ID.Descrição] == ID, Colunas.Gênero.Descrição] = genero
if debug: print(primeiroNome + ' -> ' + genero)
else:
# Adiciona nome no lote a ser buscado em `computaGeneros`
if nomeSemAcento in loteGeneros:
if not ID in loteGeneros[nomeSemAcento]:
loteGeneros[nomeSemAcento].append(ID)
else:
loteGeneros[nomeSemAcento] = [ID]
def buscaColuna(coluna):
'''Busca recorrência por correspondência na coluna indicada.'''
if coluna in df:
key = df.at[i, coluna]
if pd.notnull(key):
if computaRecorrencia:
nonlocal nome
upperName = unidecode(nome.upper()) # Similaridade não considera acentos (a = á) e é case insensitive (a = A)
def analiseNomes(pessoa):
'''Retorna o grau de similaridade (0-1) e o nome referente à melhor correspondência (maior similaridade) encontrada.'''
nomes = pessoa.split('|')
values = [jw.similarity(unidecode(nome.upper()), upperName) for nome in nomes]
index, value = max(enumerate(values), key = itemgetter(1))
return value, nomes[index]
if coluna == Colunas.Nome.Descrição:
similaridades = pessoas[coluna].map(analiseNomes)
matches = pd.Series([similaridade[0] for similaridade in similaridades]) > .96 # Grau de similaridade mínimo aceitável: 96%
else:
matches = pessoas[coluna] == key
if matches.sum() > 1:
# Se acontecer com nome, talvez seja interessante aumentar o grau de similaridade mínimo aceitável
# Se acontecer com documento, provavelmente é bug
raise Exception('Mais de um registro de ' + coluna + ' "' + key + '" encontrado')
if matches.any():
ID = pessoas.loc[matches, Colunas.ID.Descrição].iloc[0]
if coluna == Colunas.Nome.Descrição:
similaridade = max(similaridades, key = itemgetter(0))
elif computaRecorrencia:
similaridade = analiseNomes(pessoas.loc[matches, Colunas.Nome.Descrição].iloc[0])
else:
similaridade = [1]
if similaridade[0] < 1:
# Caso a mesma pessoa dê entrada com nomes diferentes, todos são salvos
pessoas.loc[matches & (pessoas[Colunas.Nome.Descrição] == ''), Colunas.Nome.Descrição] = nome
pessoas.loc[matches & (pessoas[Colunas.Nome.Descrição] != ''), Colunas.Nome.Descrição] += '|' + nome
# Se coluna diverge dentre registros da mesma pessoa, o primeiro encontrado tem valor e o resto é ignorado
# Curso
if computaCursos and Colunas.Curso.Descrição in df and pd.isnull(pessoas.loc[matches, Colunas.Curso.Descrição].iloc[0]):
curso = analiseCurso()
if curso:
pessoas.loc[matches, Colunas.Curso.Descrição] = curso
# Gênero
if computaGenero and Colunas.Nome.Descrição in df and pd.isnull(pessoas.loc[matches, Colunas.Gênero.Descrição].iloc[0]):
analiseGenero(ID)
if debug:
print('Recorrência encontrada pelo ' + coluna + f' ({key})')
if coluna == Colunas.Nome.Descrição:
print(f'Similaridade: {similaridade[0] * 100:.0f}% (' + similaridade[1] + ')')
print(f'ID: {ID:.0f}')
pessoas.loc[matches, Colunas.Quantidade.Descrição] += 1
pessoas.loc[matches, Colunas.Evasora.Descrição] = path
return ID
for arg in args:
if arg is not None:
ID = buscaColuna(arg)
if ID: return ID
global lastID
lastID += 1
if debug: print(f'Recorrência não encontrada. ID atribuído: {lastID:.0f}')
pessoas.loc[pessoas.shape[0]] = {
Colunas.ID.Descrição: lastID,
Colunas.RG.Descrição: df.at[i, Colunas.RG.Descrição] if Colunas.RG.Descrição in df else None,
Colunas.CPF.Descrição: df.at[i, Colunas.CPF.Descrição] if Colunas.CPF.Descrição in df else None,
Colunas.Nome.Descrição: df.at[i, Colunas.Nome.Descrição] if temNome else '',
Colunas.Quantidade.Descrição: 1,
Colunas.Retentora.Descrição: path,
Colunas.Evasora.Descrição: path,
Colunas.Curso.Descrição: analiseCurso() if computaCursos and Colunas.Curso.Descrição in df and pd.notnull(df.at[i, Colunas.Curso.Descrição]) else None,
Colunas.Gênero.Descrição: None,
}
if computaGenero and Colunas.Nome.Descrição in df and pd.notnull(df.at[i, Colunas.Nome.Descrição]):
analiseGenero(lastID)
return lastID
ID = analiseRecorrencia(Colunas.RG.Descrição, Colunas.CPF.Descrição, Colunas.Nome.Descrição if temNome and computaRecorrencia else None)
df.at[i, Colunas.ID.Descrição] = ID
ids.append(ID)
df.at[i, Colunas.Curso.Descrição] = None
if debug: print('')
if Colunas.Curso.Descrição in df:
df[Colunas.Curso.Descrição] = df[Colunas.Curso.Descrição].apply(lambda value: str(value) if pd.notnull(value) else None)
dfs[path] = df
if debug: print('')
return ids
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def computaGeneros():
'''
Busca os gêneros de lotes de 10 nomes a partir da variável `loteGeneros` para o dicionário de gêneros e atribuindo o gênero a cada pessoa.
Busca acontece primeiro para os nomes mais recorrentes. Caso uma pessoa com múltiplos nomes tenha o gênero encontrado, os outros nomes serão ignorados.
Caso o nome seja ignorado por todas as pessoas referentes, ele deixa de ser buscado.
'''
def iteradorLote():
for nome, IDs in loteGeneros.items():
matches = pessoas[Colunas.ID.Descrição].isin(IDs)
if any(pd.isnull(pessoas.loc[matches, Colunas.Gênero.Descrição])):
yield nome, matches, IDs.__len__()
for lote in chunks(sorted(iteradorLote(), key = lambda item: item[2], reverse = True), 10):
retorno = genderize.get([nome for nome, _, _ in lote])
#country_id = 'br', language_id = 'pt'
for genero, (nome, matches, _) in zip([genero[0] if genero else 'n' for genero in [genero['gender'] for genero in retorno]], lote):
pessoas.loc[matches, Colunas.Gênero.Descrição] = generos[nome] = genero
if debug: print(nome + ' -> ' + genero)
arquivo = "../Saida/generos.json"
createDir(arquivo)
with open(arquivo, 'w') as outfile:
json.dump(generos, outfile, sort_keys = True, indent = 2)
if debug:
print('Salvando Saida/generos.json')
print('')
def analisaAcao(path, ids):
'''
Faz a análise de gênero e cursos da açao a partir do `DataFrame` referente ao caminho indicado em `dfs`.
Opcionalmente, indica-se `ids` para restringir a análise apenas a certas pessoas (útil para entrada incremental).
'''
if debug: print('Analisando ' + path)
df = dfs[path]
algum = not incremental or not os.path.exists("../Saida/" + path + "/output.csv")
if computaCursos or computaGenero:
for i in df.index:
pessoa = pessoas[pessoas[Colunas.ID.Descrição] == df.at[i, Colunas.ID.Descrição]]
ID = pessoa[Colunas.ID.Descrição].item()
if ids and not ID in ids: continue
if debug: print(ID)
# Curso
if computaCursos and (not Colunas.Curso.Descrição in df or pd.isnull(df.at[i, Colunas.Curso.Descrição])):
value = pessoa[Colunas.Curso.Descrição].item()
if pd.isnull(value):
if debug: print('Curso não preenchido')
else:
try:
df.at[i, Colunas.Curso.Descrição] = value
if debug: print('Curso: ' + value)
algum = True
except StopIteration:
raise Exception('Curso desconhecido: ' + value)
# Gênero
if computaGenero and (not Colunas.Gênero.Descrição in df or pd.isnull(df.at[i, Colunas.Gênero.Descrição])):
value = pessoa[Colunas.Gênero.Descrição].item()
genero = 'n' if pd.isnull(value) else value
df.at[i, Colunas.Gênero.Descrição] = genero
if debug: print('Gênero: ' + genero)
algum = True
if debug: print('')
if algum:
output(df, path)
def get_last(a):
'''Retorno o índice da última ocorrência de elemento diferente de `None` na coleção `a`.'''
return get_n_last(a, 1)
def get_n_last(a, n):
'''Retorna o índice da enésima última ocorrência de elemento diferente de `None` na coleção `a`.'''
for i, e in enumerate(reversed(a)):
if e is not None:
n = n - 1
if n == 0:
return len(a) - i - 1
return -1
def ranking(nome, colunas, legendas):
posicaoMaxima = 2
# 0 = Ano
# 1 = Mês
# 2 = Cidade
g = desc[:-1].groupby(['0', '1', '2']).agg( { coluna: np.sum for coluna in colunas })
cidades = g.index.get_level_values('2').unique()
for cidade in cidades:
if debug: print('Gerando gráfico de ranking por ' + nome + ' para ' + cidade)
dfAux = pd.DataFrame()
descCidade = desc[desc['2'] == cidade].reset_index(drop = True)
r = pd.date_range(pd.to_datetime(calendar.month_abbr[int(descCidade.at[0, '1'])] + '-' + descCidade.at[0, '0']), pd.to_datetime(calendar.month_abbr[int(descCidade.iloc[-1]['1'])] + '-' + descCidade.iloc[-1]['0']), freq = 'MS')
for date in r:
filtro = g[(g.index.get_level_values('0') == str(date.year)) & (g.index.get_level_values('1') == f'{date.month:02d}') & (g.index.get_level_values('2') == cidade)]
dfAux = dfAux.append(filtro.apply(lambda x: x.reset_index(drop = True))
.T
.nlargest(posicaoMaxima, columns = 0)
.rank(method = 'first', ascending = False)
.T
if not filtro.empty else | pd.Series() | pandas.Series |
from bs4 import BeautifulSoup
import requests
import pandas as pd
## don't truncate printed urls
pd.set_option('display.max_colwidth', None)
## spoof user-agent
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36',
'Content-Type': 'text/html'}
## category urls
urls = ['http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fpoliticalnews&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2frepublicofirelandnews&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fassemblyelection&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fbrexit&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fcivilrights&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fgeneralelection&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fnorthernirelandnews&onlyPrimary=false',
'http://www.irishnews.com/dyna/partial/articlesbycategory?s=0&e=2501&category=%2firishnews%2fnews%2fuknews&onlyPrimary=false']
## takes <article></article> element and converts to df row
def elementToRow(soup):
titleBlock = soup.find(class_="lancio-title")
title = titleBlock.find("a").get_text()
url = titleBlock.find("a")["href"].lstrip("/")
## warning, stub is often similar to title
stub = soup.find(class_="lancio-text").get_text()
if(soup.find(class_="lancio-tag")):
author = soup.find(class_="lancio-tag").get_text()
else:
author = ""
rawDate = soup.find(class_="lancio-datetime-string")['datetime'].split(" ")[0]
d = {"title": [title], "url": [url], "stub":[stub], "author":[author], "date":[rawDate]}
df = pd.DataFrame(data=d)
df['date'] = pd.to_datetime(df['date'])
return df
## iterate over category urls, populate df
def scrape():
res = pd.DataFrame()
for u in urls:
r = requests.get(u, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser');
for ele in soup.find_all(class_="row lancio"):
tmp = [res, elementToRow(ele)]
res = | pd.concat(tmp) | pandas.concat |
#!/usr/bin/env python3
import pandas as pd
import re, ntpath, time, shlex, datetime, warnings, os
import numpy as np
from ast import literal_eval
warnings.filterwarnings("ignore")
try:
import easygui
except ModuleNotFoundError:
print("A library is missing, please wait while it gets installed.\n")
os.system("pip install easygui")
print("Library installed sucessfully.\n")
import easygui
timestr = time.strftime("%Y%m%d-%H%M%S")
#Reading contents from the file
path = easygui.fileopenbox(msg=None, title="Select file", default='*', filetypes=None, multiple=False)
outname = ntpath.basename(path).replace(".txt", "")
def filecontent(path):
#checking for possible unnecessary 8 digit numbers in each line
with open(path, 'r') as file:
content = file.readlines()
for line in content:
line = line.rstrip()
a = re.findall(r"\D(0000\d{4})\D", " "+line+" ")
if a:
a = "".join(a)
line = line.replace(a, "").rstrip()
with open("temp.txt", 'a') as f:
f.write(line + "\n")
#splitting the txt based on a delimiter
with open("temp.txt", 'r') as file:
content = file.read().rstrip()
lines = content.split('.')
lines = [x for x in lines if x]
os.remove("temp.txt")
return lines
lines = filecontent(path)
#Removing quotes from the csv file
def file_modify(filename, a=None):
with open(filename, 'r+') as file:
content = file.read()
for ch in ['"','[',']']:
if ch in content:
content = content.replace(ch,'')
if "\n\n" in content:
content = content.replace("\n\n", "\n")
if a:
content = content.replace(",", "")
with open(filename, 'w') as file:
file.write(content)
#Checking for duplicate rows in code table
def check_dup(f):
duplicateRowsDF = f[f.duplicated(['Table Name', '1st'])]
print("We have " + str(len(duplicateRowsDF)) + " duplicate rows in code generation table. \n")
print(duplicateRowsDF)
#Intializing variables for parsing, used later to convert to dataframe
table_name, version_name, type_name, issorted, isduplicate, table_data, value_name, time_t, search_type = ([] for i in range(9))
counter = 0
print("\n--------------------------------WARNINGS/ERRORS------------------------------------\n\n")
for line in lines:
if '*+' in line: #removing comment symbols
line.replace('*+', " ")
#for table name field
try:
tname = re.search('TABLE NAME IS (.*) VERSION', line)
table_name.append(tname.group(1))
except AttributeError:
print("There is no such attribute. Check the file for correct table name at index " + str(counter))
table_name.append("NaN")
#for version field
try:
vname = re.search('VERSION IS (.*)', line)
version_name.append("Version " + vname.group(1))
except AttributeError:
print("There is no such attribute. Check the file for correct version at index " + str(counter))
version_name.append("NaN")
#for type field
try:
tyname = re.search('TYPE IS (.*)', line)
if tyname.group(1) in ['EDIT VALID', 'EDIT INVALID', 'CODE']:
type_name.append(tyname.group(1))
else:
print("There is no such attribute. Check the file for correct type at index " + str(counter))
type_name.append("NaN")
if tyname.group(1) in ['EDIT VALID', 'EDIT INVALID']:
#for alphanumeric field incase of EDIT VALID
tbldata = re.search('TABLE DATA IS (.*)', line)
if tbldata == None:
table_data.append("")
else:
table_data.append(tbldata.group(1))
elif tyname.group(1) == 'CODE':
encodedata = re.search('ENCODE DATA IS (.*)', line)
decodedata = re.search('DECODE DATA IS (.*)', line)
if encodedata and decodedata == None:
table_data.append("")
else:
tbldata = ",".join((encodedata.group(1),decodedata.group(1)))
table_data.append(tbldata)
else:
print("There is no such attribute. Check the file for correct datatype at index " + str(counter))
table_data.append("NaN")
except AttributeError:
print("There is no such attribute. Check the file for correct type & datatype at index " + str(counter))
type_name.append("NaN")
table_data.append("NaN")
#for sorted/unsorted field
try:
sname = re.search('TABLE IS (.*)', line)
if sname.group(1) in ['SORTED', 'UNSORTED']:
issorted.append(sname.group(1))
else:
print("There is no such attribute. Check the file for correct table sorted/unsorted value at index " + str(counter))
issorted.append("NaN")
except AttributeError:
print("There is no such attribute. Check the file for correct table sorted/unsorted value at index " + str(counter))
issorted.append("NaN")
#for whether duplicate values are allowed or not
try:
dname = re.search('DUPLICATES ARE (.*)', line)
if dname == None:
isduplicate.append("")
elif dname.group(1) == 'NOT ALLOWED':
isduplicate.append("NODUP")
elif dname.group(1) == 'ALLOWED':
isduplicate.append("DUP")
except AttributeError:
print("There is no such attribute. Check the file for duplicate field at index " + str(counter))
#for values field
try:
vname = re.search('VALUES ARE \( (.*) \)', line, flags=re.DOTALL)
temp1 = vname.group(1).replace("' '", "''")
temp2 = ' '.join(map(str.strip, temp1.split("\n")))
value_name.append(temp2)
except AttributeError:
print("There is no such attribute. Check the file for Value field at index " + str(counter))
#for searchtype field
try:
temp = re.search('SEARCH IS (.*)', line)
if "BINARY" in temp.group(1):
search_type.append("BIN")
elif "LINEAR" in temp.group(1):
search_type.append("LIN")
except AttributeError:
print("There is no such attribute. Check the file for Value field at index " + str(counter))
search_type.append("NaN")
#for timestamp
try:
time_t.append(" * Table generated at " + str(datetime.datetime.now()) + "\n\t ")
except AttributeError:
print("There is no such attribute. Check the code for correct code syntax or OS for time.")
counter += 1
#Creating Dataframe
summary = {
"Table Name": table_name,
"Version": version_name,
"Type": type_name,
"Is_sorted": issorted,
"Duplicate": isduplicate,
"Datatype": table_data,
"Searchtype": search_type,
"Timestamp": time_t,
"Value": value_name
}
df = pd.DataFrame(summary)
df['Datatype'] = df['Datatype'].str.replace('ALPHANUMERIC','X')
df['Datatype'] = df['Datatype'].str.replace('NUMERIC','9')
df["Is_sorted"].replace({"SORTED": "Y", "UNSORTED": "N"}, inplace=True)
df_summary = df.iloc[:,:-2]
print("\n--------------------------------SUMMARY FILE-----------------------------------\n\n")
print(df_summary)
print("\n\n")
#Export to csv file
csv_filename = outname + "_summary" + ".csv"
df_summary.to_csv(csv_filename, header=False, index=False)
csvmodify = file_modify(csv_filename)
####################################################################################
#Function for Code Generation Table
def code_table():
df_type_code_orig = df[df["Type"]=="CODE"]
#df_type_code
columns = ['Table Name', 'Value']
df_type_code = pd.DataFrame(df_type_code_orig, columns=columns)
for index, row in df_type_code.iterrows():
span = 2
words = shlex.split(row.Value, posix = False)
row.Value = [",".join(words[i:i+span]) for i in range(0, len(words), span)]
df_type_code['Index'] = df_type_code.index
df_temp = (pd.DataFrame({'Table Name': list(df_type_code['Table Name']),
'Value': list(df_type_code['Value']),
'Index': list(df_type_code['Index'])})
.set_index(['Index', 'Table Name']))
temp = df_temp.explode('Value')
temp.reset_index(inplace=True, level=1)
df_new = temp[['Table Name', 'Value']]
df_final = pd.concat([df_new, df_new['Value'].str.split(',', expand=True)], axis=1)
df_final = df_final.drop(df_final.columns[[1]], axis=1)
df_final.columns = ['Table Name', '1st', '2nd']
df_final.reset_index(inplace=True)
df_final['type'] = 'C'
df_final['blank'] = ''
df_final['zero'] = '0'
columns = ['Table Name', 'type', '1st', 'zero', 'Table Name', 'type', '2nd', 'blank']
df_final = df_final[columns]
temp1 = df_type_code_orig["Searchtype"] + "," + df_type_code_orig['Is_sorted'] + "," + df_type_code_orig['Datatype'] + ","
temp1 = pd.DataFrame(temp1, columns=['one'])
temp2 = df_type_code['Value'].to_frame()
temp = pd.concat([temp1, temp2], axis=1, ignore_index=True)
temp.columns = ['one', 'two']
df2 = temp.explode('two')
df_final2 = pd.concat([df2, df2['two'].str.split(',', expand=True)], axis=1)
df_final2 = df_final2.drop(df_final2.columns[[1]], axis=1)
df_final2.columns = ['Name', '1st', '2nd']
df_final2.reset_index(inplace=True)
df_final2 = df_final2[['Name']]
result = pd.concat([df_final, df_final2], axis=1)
result_f1 = result.iloc[:, :3]
result_f11 = result_f1[['Table Name', 'type', '1st']].apply(lambda x: ''.join(x), axis=1).to_frame()
result_f2 = result.iloc[:,3:7]
result_f22 = result_f2[['zero', 'Table Name', 'type', '2nd']].apply(lambda x: ''.join(x), axis=1).to_frame()
result_f3 = result.iloc[:,-1:]
final_result = | pd.concat([result_f11,result_f22,result_f3], axis=1) | pandas.concat |
"""
This module will hold general functions used by main
"""
import argparse # type: ignore
from datetime import datetime # type: ignore
from datetime import timedelta # type: ignore
import pandas as pd # type: ignore
import app.modules.stock as stock # type: ignore
import app.modules.database as database # type: ignore
import app.modules.sql as sql # type: ignore
import app.modules.ml_gmm as ml_gmm # type: ignore
import app.modules.mlflow as mlflow # type: ignore
def get_args():
"""
get_args returns the arguments passed
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("task")
args = parser.parse_args()
return args
def init_stocks_db(config: dict):
"""
init_stocks_db used for setting up the database
:param config: config used to access DB
:return:
"""
database_manager: database.DatabaseManager = database.DatabaseManager(config)
database_manager.connect_db()
database_manager.send_sql(sql_query=sql.create_stock_table("stocks"))
database_manager.close_conn()
def upload_this_weeks_stock(config: dict,
stock_symbol: str):
"""
upload_this_weeks_stock is used to download the weeks stock data
:param config: config used to access DB
:param stock_symbol: stock company to download
:return:
"""
start_date: str = (datetime.today() - timedelta(days=7)).strftime("%Y-%m-%d")
stock_data_frame: pd.DataFrame = stock.get_stock(stock.stock_man,
stock_symbol,
start_date)
database_manager: database.DatabaseManager = database.DatabaseManager(config)
database_manager.connect_db()
database_manager.df_to_sql(stock_data_frame, "stocks")
database_manager.close_conn()
def upload_ayear_stock(config: dict,
stock_symbol: str):
"""
upload_ayear_stock is used to download the years stock data
:param config: config used to access DB
:param stock_symbol: stock company to download
:return:
"""
start_date: str = (datetime.today() - timedelta(days=360)).strftime("%Y-%m-%d")
stock_data_frame: pd.DataFrame = stock.get_stock(stock.stock_man,
stock_symbol,
start_date)
database_manager: database.DatabaseManager = database.DatabaseManager(config)
database_manager.connect_db()
database_manager.df_to_sql(stock_data_frame, "stocks")
database_manager.close_conn()
def search_train_gmm_model(config: dict,
table: str):
"""
search_train_gmm_model is used to get all the metrics for gmm models on stock data
:param config: config used to access DB
:param table: table that has stck data
:return:
"""
mlflow_manager = mlflow.MlFlowManager(config["ip"],
"GMM_example")
database_manager: database.DatabaseManager = database.DatabaseManager(config)
database_manager.connect_db()
stocks: list = database_manager.receive_sql_fetchall(sql.select_all_table(table))
database_manager.close_conn()
gmm_ml_manager: ml_gmm.GmmMlManager = ml_gmm.GmmMlManager( | pd.DataFrame(stocks) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 13:02:14 2019
@author: ranap
V = l/mu
D = 2 * sigma^2 = (2*l^2) / lambda
Args:
N: number of particle
Return:
dataframe with probabilty
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from scipy.stats import invgauss
import math
import time
import sys
from joblib import Parallel, delayed
def brownian(d,v,sigma,t_bin=0.001):
# simulate brownian motion
pos= 0
t= 1
v_t = v*t_bin
while pos < d:
pos = pos + v_t + (sigma * np.random.randn() * math.sqrt(t_bin))
t+=1
return t * t_bin
def eq1(t,T):
s = 2
arr =np.sort(t, axis=None)
t_bind=1
t_next=2
while t_next < len(arr):
if(arr[t_next] -arr[t_bind] > T):
s+=1; t_bind = t_next; t_next+=1
else:
t_next+=1
s_arr = np.zeros((len(arr)-1,), dtype=int)
s_arr[s-2] = 1
return s_arr
def sim(N,d,v,sigma,T,tau, no_sim=100000):
# simulate N particle model
s_count = np.zeros(N-1)
for i in range(1,no_sim):
tauT = np.zeros((N,),dtype=float)
tauT[0] = 0
for i in range(1,len(tauT)):
tauT[i] =tauT[i-1]+ np.random.exponential(tau)
t = np.zeros((N,),dtype=float)
for i in range(0,len(t)):
t[i] =tauT[i]+ brownian(d,v,sigma)
t=t-t[0] # making relatiove to 1st particle
s = eq1(t,T)
s_count +=s
s_prob = s_count/no_sim
res= np.zeros((5+N-1,), dtype=float)
res[0]=d; res[1]=v; res[2]=sigma; res[3]=T; res[4]=tau;
res[5:]=s_prob
return(res)
def test():
N=4; d=1; v=1; sigma=1; T=1; tau=1
start = time.time()
res = sim(N,d,v,sigma,T,tau)
print("result : ", res, " Time: ", time.time()-start)
def main_prl(N):
# run for different parameter combination
d=1; v=1; sigma=1
t_range = np.arange(0.1,4.01,0.2)
tau_range = np.arange(0.5,4.01,0.2)
Data = np.zeros((len(t_range)*len(tau_range),5+N-1),dtype=float)
res = Parallel(n_jobs=16)(delayed(sim)(N,d,v,sigma,i,j) for j in tau_range for i in t_range)
res = np.array(res)
col_name= ['d', 'v', 'sigma', 'T', 'tau'] + ['S'+ str(i) for i in range(2,N+1)]
Data = | pd.DataFrame(res, columns=col_name) | pandas.DataFrame |
"""
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
from statsmodels.compat.python import lmap
from functools import reduce
import numpy as np
from pandas import DataFrame, Series, isnull, MultiIndex
import statsmodels.tools.data as data_util
from statsmodels.tools.decorators import cache_readonly, cache_writable
from statsmodels.tools.sm_exceptions import MissingDataError
def _asarray_2dcolumns(x):
if np.asarray(x).ndim > 1 and np.asarray(x).squeeze().ndim == 1:
return
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
#Have to have the asarrays because isnull does not account for array_like
#input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None]
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array_like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
return np.logical_or(_asarray_2d_null_rows(x),
(x_is_boolean_array | _asarray_2d_null_rows(y)))
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
_param_names = None
_cov_names = None
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
if data_util._is_recarray(endog) or data_util._is_recarray(exog):
import warnings
from statsmodels.tools.sm_exceptions import recarray_warning
warnings.warn(recarray_warning, FutureWarning)
if 'design_info' in kwargs:
self.design_info = kwargs.pop('design_info')
if 'formula' in kwargs:
self.formula = kwargs.pop('formula')
if missing != 'none':
arrays, nan_idx = self.handle_missing(endog, exog, missing,
**kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog,
self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
self.const_idx = None
self.k_constant = 0
self._handle_constant(hasconst)
self._check_integrity()
self._cache = {}
def __getstate__(self):
from copy import copy
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
d["restore_design_info"] = True
return d
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
from patsy import dmatrices, PatsyError
exc = []
try:
data = d['frame']
except KeyError:
data = d['orig_endog'].join(d['orig_exog'])
for depth in [2, 3, 1, 0, 4]: # sequence is a guess where to likely find it
try:
_, design = dmatrices(d['formula'], data, eval_env=depth,
return_type='dataframe')
break
except (NameError, PatsyError) as e:
exc.append(e) # why do I need a reference from outside except block
pass
else:
raise exc[-1]
self.design_info = design.design_info
del d["restore_design_info"]
self.__dict__.update(d)
def _handle_constant(self, hasconst):
if hasconst is False or self.exog is None:
self.k_constant = 0
self.const_idx = None
else:
# detect where the constant is
check_implicit = False
exog_max = np.max(self.exog, axis=0)
if not np.isfinite(exog_max).all():
raise MissingDataError('exog contains inf or nans')
exog_min = np.min(self.exog, axis=0)
const_idx = np.where(exog_max == exog_min)[0].squeeze()
self.k_constant = const_idx.size
if self.k_constant == 1:
if self.exog[:, const_idx].mean() != 0:
self.const_idx = int(const_idx)
else:
# we only have a zero column and no other constant
check_implicit = True
elif self.k_constant > 1:
# we have more than one constant column
# look for ones
values = [] # keep values if we need != 0
for idx in const_idx:
value = self.exog[:, idx].mean()
if value == 1:
self.k_constant = 1
self.const_idx = int(idx)
break
values.append(value)
else:
# we did not break, no column of ones
pos = (np.array(values) != 0)
if pos.any():
# take the first nonzero column
self.k_constant = 1
self.const_idx = int(const_idx[pos.argmax()])
else:
# only zero columns
check_implicit = True
elif self.k_constant == 0:
check_implicit = True
else:
# should not be here
pass
if check_implicit and not hasconst:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
(np.ones(self.exog.shape[0]), self.exog))
rank_augm = np.linalg.matrix_rank(augmented_exog)
rank_orig = np.linalg.matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None
elif hasconst:
# Ensure k_constant is 1 any time hasconst is True
# even if one is not found
self.k_constant = 1
@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
return x[nan_mask][:, nan_mask]
@classmethod
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop('missing_idx', None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ['exog']
elif exog is not None:
combined = (endog, exog)
combined_names = ['endog', 'exog']
else:
combined = (endog,)
combined_names = ['endog']
none_array_names += ['exog']
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in kwargs.items():
if value_array is None or value_array.ndim == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError("Arrays with more than 2 dimensions "
"are not yet handled")
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra arrays given to model.")
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra 2d arrays given to model.")
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing do not do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
if missing_idx is not None:
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
return combined, []
elif missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
elif missing == 'drop':
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
if combined_2d:
combined.update(dict(zip(combined_2d_names,
lmap(drop_nans_2d, combined_2d))))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing)
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self.orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(self.endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self.orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(self.exog)
return list(xnames)
return None
@property
def param_names(self):
# for handling names of 'extra' parameters in summary, etc.
return self._param_names or self.xnames
@param_names.setter
def param_names(self, values):
self._param_names = values
@property
def cov_names(self):
"""
Labels for covariance matrices
In multidimensional models, each dimension of a covariance matrix
differs from the number of param_names.
If not set, returns param_names
"""
# for handling names of covariance names in multidimensional models
if self._cov_names is not None:
return self._cov_names
return self.param_names
@cov_names.setter
def cov_names(self, value):
# for handling names of covariance names in multidimensional models
self._cov_names = value
@cache_readonly
def row_labels(self):
exog = self.orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self.orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
if isinstance(arr.columns, MultiIndex):
# Flatten MultiIndexes into "simple" column names
return ['_'.join((level for level in c if level))
for c in arr.columns]
else:
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util._is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
endog = np.asarray(endog)
if len(endog) == 1: # never squeeze to a scalar
if endog.ndim == 1:
return endog
elif endog.ndim > 1:
return np.asarray([endog.squeeze()])
return endog.squeeze()
def _get_xarr(self, exog):
if data_util._is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns', names=None):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
elif how == 'generic_columns':
return self.attach_generic_columns(obj, names)
elif how == 'generic_columns_2d':
return self.attach_generic_columns_2d(obj, names)
elif how == 'ynames':
return self.attach_ynames(obj)
elif how == 'multivariate_confint':
return self.attach_mv_confint(obj)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
def attach_mv_confint(self, result):
return result
def attach_generic_columns(self, result, *args, **kwargs):
return result
def attach_generic_columns_2d(self, result, *args, **kwargs):
return result
def attach_ynames(self, result):
return result
class PatsyData(ModelData):
def _get_names(self, arr):
return arr.design_info.column_names
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _convert_endog_exog(self, endog, exog=None):
#TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
exog = exog if exog is None else np.asarray(exog)
if endog.dtype == object or exog is not None and exog.dtype == object:
raise ValueError("Pandas data cast to numpy dtype of object. "
"Check input data with np.asarray(data).")
return super(PandasData, self)._convert_endog_exog(endog, exog)
@classmethod
def _drop_nans(cls, x, nan_mask):
if isinstance(x, (Series, DataFrame)):
return x.loc[nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans(x, nan_mask)
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
if isinstance(x, (Series, DataFrame)):
return x.loc[nan_mask].loc[:, nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans_2d(x, nan_mask)
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
if (exog is not None and
(hasattr(endog, 'index') and hasattr(exog, 'index')) and
not self.orig_endog.index.equals(self.orig_exog.index)):
raise ValueError("The indices for endog and exog are not aligned")
super(PandasData, self)._check_integrity()
def _get_row_labels(self, arr):
try:
return arr.index
except AttributeError:
# if we've gotten here it's because endog is pandas and
# exog is not, so just return the row labels from endog
return self.orig_endog.index
def attach_generic_columns(self, result, names):
# get the attribute to use
column_names = getattr(self, names, None)
return Series(result, index=column_names)
def attach_generic_columns_2d(self, result, rownames, colnames=None):
colnames = colnames or rownames
rownames = getattr(self, rownames, None)
colnames = getattr(self, colnames, None)
return DataFrame(result, index=rownames, columns=colnames)
def attach_columns(self, result):
# this can either be a 1d array or a scalar
# do not squeeze because it might be a 2d row array
# if it needs a squeeze, the bug is elsewhere
if result.ndim <= 1:
return Series(result, index=self.param_names)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.param_names)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.cov_names, columns=self.cov_names)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
squeezed = result.squeeze()
k_endog = np.array(self.ynames, ndmin=1).shape[0]
if k_endog > 1 and squeezed.shape == (k_endog,):
squeezed = squeezed[None, :]
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return Series(squeezed, index=self.row_labels[-len(result):])
else:
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
squeezed = result.squeeze()
k_endog = np.array(self.ynames, ndmin=1).shape[0]
if k_endog > 1 and squeezed.shape == (k_endog,):
squeezed = np.asarray(squeezed)[None, :]
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return Series(squeezed, index=self.predict_dates)
else:
return DataFrame(np.asarray(result),
index=self.predict_dates,
columns=self.ynames)
def attach_mv_confint(self, result):
return DataFrame(result.reshape((-1, 2)),
index=self.cov_names,
columns=['lower', 'upper'])
def attach_ynames(self, result):
squeezed = result.squeeze()
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return Series(squeezed, name=self.ynames)
else:
return | DataFrame(result, columns=self.ynames) | pandas.DataFrame |
#as duas bases estão nessa aplicação
#import matplotlib
#import matplotlib.mlab as mlab
#import matplotlib.gridspec as gridspec
#from scipy.misc import electrocardiogram
#from scipy import stats
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
import pandas as pd
import numpy as np
from scipy.stats import kurtosis, skew
#Funções
#Média
def media(lista):
return (sum(lista)/len(lista))
def comportamento(s, lista_dif, lista_dif_index, peaks):
for i in range(len(s)-1):
sub = abs(s[i]-s[i+1])
sub_index = abs(peaks[i]-peaks[i+1])
lista_dif.append(sub)
lista_dif_index.append(sub_index)
def peaks(lista):
#distancia = frequencia amostral
peaks, _ = find_peaks(lista, distance=fs//2)
np.diff(peaks)
return peaks
def num_peaks(lista, peaks):
#número de picos
num_peaks = np.zeros(len(peaks)-2)
for i in range(len(peaks)-2):
num_peaks[i]=lista[peaks[i]]-lista[peaks[i+1]]
return num_peaks
def plot_comport(lista, peaks):
#plot de comportamento dos picos
s = lista[peaks]
lista_dif = []
lista_dif_index = []
return comportamento(s, lista_dif, lista_dif_index, peaks)
# REFERENCIA
# https://media.readthedocs.org/pdf/python-heart-rate-analysis-toolkit/latest/python-heart-rate-analysis-toolkit.pdf
# x = electrocardiogram()#[2000:4000]
data = pd.read_csv('lista2.txt')
data2 = | pd.read_csv('lista3.txt') | pandas.read_csv |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, | pd.Index(idx.asi8, name="xxx") | pandas.Index |
import numpy as np
import pandas as pd
class PricingInstance(object):
"""Instance of the pricing problem"""
def __init__(self, rp, rn, Xp, Xn, v0, z0):
self.rp = rp
self.rn = rn
self.Xp = Xp
self.Xn = Xn
self.v0 = v0
self.z0 = z0
def eval_singletons(self, lambda1):
"""Evaluate all singleton solutions (adding one literal)"""
self.Rp = np.dot(self.rp, self.Xp)
self.Rn = np.dot(self.rn, self.Xn)
self.v1 = self.v0 - self.Rp - self.Rn + lambda1
self.v1 = pd.Series(self.v1, index=self.Xp.columns)
def compute_LB(self, lambda1):
"""Compute lower bound on higher-order solutions"""
Rp0 = self.rp.sum()
if np.ndim(lambda1):
self.LB = np.array([])
else:
self.LB = np.minimum(np.cumsum(np.sort(self.Rp)[::-1])[1:], Rp0)
self.LB += np.sort(self.Rn)[-2::-1]
self.LB -= lambda1 * np.arange(2, len(self.Rp)+1)
self.LB = self.v0 - self.LB
# Lower bound specific to each singleton solution
self.LB1 = self.v1 + self.Rp - Rp0 + lambda1
if len(self.LB):
self.LB1[self.LB1 < self.LB.min()] = self.LB.min()
def beam_search_K1(r, X, lambda0, lambda1, UB=0, D=10, B=5, wLB=0.5, eps=1e-6, stopEarly=True):
"""Beam search to generate SINGLE SOLUTION (K = 1) to pricing problem
Problem parameters:
r = cost vector (residuals)
X = binary feature DataFrame
lambda0 = fixed cost of a term
lambda1 = cost per literal
Algorithm parameters:
UB = initial upper bound on value of solutions
D = maximum degree
B = beam width
wLB = weight on lower bound in evaluating nodes
eps = numerical tolerance on comparisons
stopEarly = stop after current degree once solution is found
"""
# Initialize output
vOut = np.array([])
zOut = pd.Series(index=X.columns)
# Initialize queue with root instance
# Separate data according to positive and negative residuals
rp = r[r > 0]
rn = r[r < 0]
Xp = 1 - X.loc[r > 0]
Xn = 1 - X.loc[r < 0]
instCurr = [PricingInstance(rp, rn, Xp, Xn, r.sum() + lambda0, pd.Series(0, index=zOut.index))]
# Iterate over increasing degree while queue is non-empty
deg = 0
while (not len(vOut) or not stopEarly) and len(instCurr) and deg < D:
deg += 1
# Initialize list of children to process
vNext = np.array([])
vNextMax = np.inf
zNext = pd.DataFrame([], index=X.columns)
idxInstNext = np.array([], dtype=int)
idxFeatNext = np.array([])
# Process instances in queue
for (idxInst, inst) in enumerate(instCurr):
# Evaluate all singleton solutions
inst.eval_singletons(lambda1)
# Solutions that improve on current output
vCand = inst.v1[inst.v1 < UB - eps]
if len(vCand):
# Update output with best of these solutions
idxMin = vCand.idxmin()
UB = vCand[idxMin]
vOut = np.array([UB])
zOut = inst.z0.copy()
zOut[idxMin] = 1
# Compute lower bounds on higher-degree solutions
inst.compute_LB(lambda1)
# Evaluate children using weighted average of their costs and LBs
vChild = (1 - wLB) * inst.v1 + wLB * inst.LB1
# Best children with potential to improve on current output and current candidates (allow for duplicate removal)
vChild = vChild[(inst.LB1 < UB - eps) & (vChild < vNextMax - eps)].sort_values()[:B+idxInst]
if len(vChild):
# Feature indicators of these best children
zChild = pd.DataFrame(zOut.index.values[:,np.newaxis] == vChild.index.values, index=zOut.index).astype(int)
zChild = zChild.add(inst.z0, axis=0)
# Append to current candidates
vNext = np.append(vNext, vChild.values)
zNext = pd.concat([zNext, zChild], axis=1, ignore_index=True)
idxInstNext = np.append(idxInstNext, np.full(B+idxInst, idxInst))
idxFeatNext = np.append(idxFeatNext, vChild.index.values)
# Remove duplicates
_, idxUniq = np.unique(zNext, return_index=True, axis=1)
vNext = vNext[idxUniq]
zNext = zNext.iloc[:,idxUniq]
idxInstNext = idxInstNext[idxUniq]
idxFeatNext = idxFeatNext[idxUniq]
# Update candidates
idxBest = np.argsort(vNext)[:B]
vNext = vNext[idxBest]
if len(vNext):
vNextMax = vNext[-1]
zNext = zNext.iloc[:,idxBest]
zNext.columns = range(zNext.shape[1])
idxInstNext = idxInstNext[idxBest]
idxFeatNext = idxFeatNext[idxBest]
# Instances to process in next iteration
instNext = []
for (idxInst, i, idxz) in zip(idxInstNext, idxFeatNext, zNext):
# Create pricing instance
# Remove covered rows
rowKeep = instCurr[idxInst].Xp[i] == 0
rp = instCurr[idxInst].rp[rowKeep]
Xp = instCurr[idxInst].Xp.loc[rowKeep]
rowKeep = instCurr[idxInst].Xn[i] == 0
rn = instCurr[idxInst].rn[rowKeep]
Xn = instCurr[idxInst].Xn.loc[rowKeep]
# Remove redundant features
if type(Xp.columns) is pd.MultiIndex:
colKeep = pd.Series(Xp.columns.get_level_values(0) != i[0], index=Xp.columns)
if i[1] == '<=':
thresh = Xp[i[0]].columns.get_level_values(1).to_series().replace('NaN', np.nan).values
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '>') & (thresh < i[2])
elif i[1] == '>':
thresh = Xp[i[0]].columns.get_level_values(1).to_series().replace('NaN', np.nan).values
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '<=') & (thresh > i[2])
elif i[1] == '!=':
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '!=') & (Xp[i[0]].columns.get_level_values(1) != i[2])
Xp = Xp.loc[:, colKeep]
Xn = Xn.loc[:, colKeep]
instNext.append(PricingInstance(rp, rn, Xp, Xn, instCurr[idxInst].v1[i], zNext[idxz]))
instCurr = instNext
# Conjunctions corresponding to solutions
if zOut.count():
zOut = pd.DataFrame(zOut)
else:
zOut = pd.DataFrame(index=X.columns)
aOut = 1 - (np.dot(1 - X, zOut) > 0)
return vOut, zOut, aOut
def beam_search(r, X, lambda0, lambda1, K=1, UB=0, D=10, B=5, wLB=0.5, eps=1e-6, stopEarly=False):
"""Beam search to generate solutions to pricing problem
Problem parameters:
r = cost vector (residuals)
X = binary feature DataFrame
lambda0 = fixed cost of a term
lambda1 = cost per literal
Algorithm parameters:
K = maximum number of solutions returned
UB = initial upper bound on value of solutions
D = maximum degree
B = beam width
wLB = weight on lower bound in evaluating nodes
eps = numerical tolerance on comparisons
stopEarly = stop after current degree once solution is found
"""
# Initialize output
vOut = np.array([])
zOut = pd.DataFrame(index=X.columns)
# Remove redundant rows by grouping by unique feature combinations and summing residual
X2 = X.copy()
r2 = r
# Initialize queue with root instance
# Separate data according to positive and negative residuals
rp = r2[r2 > 0]
rn = r2[r2 < 0]
Xp = 1 - X2.loc[r2 > 0]
Xn = 1 - X2.loc[r2 < 0]
instCurr = [PricingInstance(rp, rn, Xp, Xn, r2.sum() + lambda0, pd.Series(0, index=zOut.index))]
# Iterate over increasing degree while queue is non-empty
deg = 0
while (not len(vOut) or not stopEarly) and len(instCurr) and deg < D:
deg += 1
# Initialize list of children to process
vNext = np.array([])
vNextMax = np.inf
zNext = pd.DataFrame([], index=X2.columns)
idxInstNext = np.array([], dtype=int)
idxFeatNext = np.array([])
# Process instances in queue
for (idxInst, inst) in enumerate(instCurr):
# Evaluate all singleton solutions
inst.eval_singletons(lambda1)
# Best solutions that also improve on current output (allow for duplicate removal)
vCand = inst.v1[inst.v1 < UB - eps].sort_values()[:K+B]
if len(vCand):
zCand = pd.DataFrame(zOut.index.values[:,np.newaxis] == vCand.index.values, index=zOut.index).astype(int)
zCand = zCand.add(inst.z0, axis=0)
# Append to current output
vOut = np.append(vOut, vCand.values)
zOut = | pd.concat([zOut, zCand], axis=1, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import pandas as pd
import datetime as dt
import netCDF4 as cdf
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy
cartopy.config['data_dir'] = '/data/project/cartopy/'
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.feature import NaturalEarthFeature, LAND, COASTLINE, LAKES
import json
import requests
#Import ICOS tools:
from icoscp.cpb.dobj import Dobj
from icoscp.sparql.runsparql import RunSparql
from icoscp.sparql import sparqls
my_home = os.getenv('HOME')
sys.path.insert(0,my_home+'/new_jupyter/modules')
from extra_sparqls import get_station_class, get_icos_stations_atc_samplingheight #, atc_station_tracer_query
# paths --- changed for new JupyterHub instance
path_stiltweb = '/data/stiltweb/'
path_stilt = '/data/stilt/'
path_edgar = '/data/stilt/Emissions/'
#path_plots = './plots/'
#------------------------------------------------------------------------------------------------------------------
def read_icos_data(f_icos,tracer,flag=True):
if (len(f_icos)>0):
df = Dobj(f_icos.dobj.iloc[0]).getColumns()
df.set_index('TIMESTAMP',inplace=True)
if flag:
if (tracer.lower() == 'mto'):
value_cols =[x.replace('-Flag','') for x in df.columns[df.columns.str.contains('Flag')]]
for x in value_cols:
df[x].loc[df[x+'-Flag']!='O']=np.nan
else:
df[tracer.lower()].loc[df['Flag']!='O']=np.nan
df['Stdev'].loc[df['Flag']!='O']=np.nan
else:
if (tracer.lower() != 'mto'):
df[tracer.lower()].loc[df[tracer.lower()]<0.0]=np.nan
df['Stdev'].loc[df['Stdev']<0.0]=np.nan
else:
df = | pd.DataFrame(None) | pandas.DataFrame |
import numpy as np
import pandas as pd
from datetime import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix
loan_data = pd.read_csv('C:/Users/PC1/Desktop/GitHub/Decision trees and random forests/LoanStats3a.csv',
header=1, skip_blank_lines=False)
loan_data = loan_data.dropna(thresh=500, axis=1)
df_loan = loan_data.filter(['purpose', 'funded_amnt_inv', 'int_rate',
'installment', 'annual_inc', 'dti', 'earliest_cr_line',
'revol_bal', 'revol_util', 'inq_last_6mths', 'loan_status'], axis=1, )
# separate data by policy code -> 1 = permitted loan, 0 = no loan
df1_loan = np.split(df_loan, df_loan[df_loan.isnull().all(axis=1)].index)
df1 = df1_loan[0]
df2 = df1_loan[3]
df1['policy_code'] = 1
df2['policy_code'] = 0
df_loan = df1.append(df2[1:])
# clean column data
df_loan['fully_paid'] = np.where(df_loan['loan_status'] == 'Fully Paid', 1, 0)
df_loan['log.annual.inc'] = np.log(df_loan['annual_inc'])
df_loan['int.rate'] = df_loan['int_rate'].str.rstrip('%').astype('float') / 100.0
df_loan['revol.util'] = df_loan['int_rate'].str.rstrip('%').astype('float') / 100.0
df_loan.drop(['int_rate', 'revol_util', 'annual_inc', 'loan_status'], axis=1, inplace=True)
# Determine the number of days individuals have had a credit line
end_date = datetime(2011, 12, 31)
df_loan['earliest_cr_line'] = pd.to_datetime(df_loan['earliest_cr_line'])
df_loan['earliest_cr_line'] = (end_date - df_loan['earliest_cr_line']).dt.days
# df_loan = df_loan.fillna({'desc':'none'})
df_loan = df_loan.fillna(0)
## Only 182 rows contain the FICO score so the description column will not be used
# df_loan['fico']= np.where(df_loan['desc'].str.contains('fico', case=False), 1,0)
# print(len(df_loan[df_loan['fico']==1]))
# sns.pairplot(df_loan, hue='policy_code')
# plt.show()
# Debt to income for the two credit.policy outcomes
plt.figure(figsize=(10, 6))
df_loan[df_loan['policy_code'] == 1]['dti'].hist(bins=30, label='Loan approved: Yes', alpha=0.5)
df_loan[df_loan['policy_code'] == 0]['dti'].hist(color='red', bins=30, label='Loan approved: No', alpha=0.5)
plt.legend()
plt.xlabel('Debt to income ratio')
plt.ylabel('Individuals')
plt.title('Lending club - loan approvals by debt to income ratio')
plt.show()
# Countplot showing counts of loans by purpose and hue fully paid. **
plt.figure(figsize=(20, 6))
c_plot = sns.countplot(data=df_loan, x='purpose', hue='policy_code', palette='RdBu')
plt.xlabel('Purpose', fontweight='bold')
plt.ylabel('Count', fontweight='bold')
plt.title('Loan status by purpose', fontweight='bold')
L = plt.legend()
L.get_texts()[0].set_text('Not repaid')
L.get_texts()[1].set_text('Repaid')
plt.tight_layout()
# Compare log of debt to income ratio to the interest rate
sns.jointplot(data=df_loan, x='log.annual.inc', y='int.rate', color='indigo')
plt.show()
# Compare the trend between fully_paid and policy_code.
sns.set_style('darkgrid')
sns.lmplot(data=df_loan, x='log.annual.inc', y='int.rate', hue='policy_code',
legend_out=True, col='fully_paid', palette='Set1')
plt.show()
'''
A decision tree or random forest model would likely be able to predict the policy_code
since there is a little separation between groups (ex. sns.pairplot(df_loan))
'''
# Generate separate columns of dummy variables from the purpose column
cat_feats = ['purpose']
df_loan = | pd.get_dummies(df_loan, columns=cat_feats, drop_first=True) | pandas.get_dummies |
import logging
import os
import math
import glob
import pyfastaq
import pysam
import pandas as pd
from Bio import pairwise2, SeqIO
from cluster_vcf_records import vcf_clusterer, vcf_file_read
from minos import dependencies, dnadiff, plots, utils
class Error (Exception): pass
class EvaluateRecall:
'''tto write'''
def __init__(self, truth_vcf_file, truth_vcf_ref, query_vcf_file, query_vcf_ref, outprefix, flank_length=31, merge_length=None, filter_and_cluster_vcf=True, discard_ref_calls=True, allow_flank_mismatches=True, exclude_regions_bed_file=None, max_soft_clipped=3):
self.truth_vcf_file = os.path.abspath(truth_vcf_file)
self.truth_vcf_ref = os.path.abspath(truth_vcf_ref)
self.query_vcf_file = os.path.abspath(query_vcf_file)
self.query_vcf_ref = os.path.abspath(query_vcf_ref)
self.sam_file_out = os.path.abspath(outprefix + '.sam')
self.seqs_out_truth = os.path.abspath(outprefix + '.truth.fa')
self.filtered_query_vcf = os.path.abspath(outprefix + '.query.filter.vcf')
self.filtered_truth_vcf = os.path.abspath(outprefix + '.truth.filter.vcf')
self.clustered_vcf_query = os.path.abspath(outprefix + '.query.filter.cluster.vcf')
self.clustered_vcf_truth = os.path.abspath(outprefix + '.truth.filter.cluster.vcf')
self.seqs_out_query = os.path.abspath(outprefix + '.query.fa')
self.sam_summary = os.path.abspath(outprefix + '.summary.tsv')
self.stats_out = os.path.abspath(outprefix + '.stats.tsv')
self.gt_conf_hist_out = os.path.abspath(outprefix + '.gt_conf_hist.tsv')
self.flank_length = flank_length
self.merge_length = flank_length if merge_length is None else merge_length
self.filter_and_cluster_vcf = filter_and_cluster_vcf
self.discard_ref_calls = discard_ref_calls
self.allow_flank_mismatches = allow_flank_mismatches
if self.filter_and_cluster_vcf:
self.vcf_to_check_truth = self.clustered_vcf_truth
self.vcf_to_check_query = self.clustered_vcf_query
else:
self.vcf_to_check_truth = self.truth_vcf_file
self.vcf_to_check_query = self.query_vcf_file
self.exclude_regions = EvaluateRecall._load_exclude_regions_bed_file(exclude_regions_bed_file)
self.max_soft_clipped = max_soft_clipped
self.number_ns = 5
@classmethod
def _load_exclude_regions_bed_file(cls, infile):
regions = {}
if infile is not None:
with open(infile) as f:
for line in f:
fields = line.rstrip().split('\t')
if fields[0] not in regions:
regions[fields[0]] = []
start = int(fields[1])
end = int(fields[2]) - 1
regions[fields[0]].append(pyfastaq.intervals.Interval(start, end))
for ref_name in regions:
pyfastaq.intervals.merge_overlapping_in_list(regions[ref_name])
return regions
@classmethod
def _interval_intersects_an_interval_in_list(cls, interval, interval_list):
# This could be faster by doing something like a binary search.
# But we're looking for points in intervals, so fiddly to implement.
# Not expecting a log interval list, so just do a simple check
# from start to end for now
i = 0
while i < len(interval_list) and interval.start > interval_list[i].end:
i += 1
return i < len(interval_list) and interval.intersects(interval_list[i])
@classmethod
def _filter_vcf_for_clustering(cls, infile, outfile, discard_ref_calls=True):
header_lines, vcf_records = vcf_file_read.vcf_file_to_dict(infile, sort=True, homozygous_only=False, remove_asterisk_alts=True, remove_useless_start_nucleotides=True)
with open(outfile, 'w') as f:
print(*header_lines, sep='\n', file=f)
for ref_name in vcf_records:
for vcf_record in vcf_records[ref_name]:
if vcf_record.FILTER == 'MISMAPPED_UNPLACEABLE':
continue
if vcf_record.FORMAT is None or 'GT' not in vcf_record.FORMAT:
logging.warning('No GT in vcf record:' + str(vcf_record))
continue
if vcf_record.REF in [".", ""]:
continue
genotype = vcf_record.FORMAT['GT']
genotypes = genotype.split('/')
called_alleles = set(genotypes)
if len(called_alleles) != 1 or (discard_ref_calls and called_alleles == {'0'}) or '.' in called_alleles:
continue
if len(vcf_record.ALT) > 1:
if called_alleles != {'0'}:
vcf_record.set_format_key_value('GT', '1/1')
try:
vcf_record.ALT = [vcf_record.ALT[int(genotypes[0]) - 1]]
except:
raise Error('BAD VCf line:' + str(vcf_record))
else:
vcf_record.set_format_key_value('GT', '0/0')
vcf_record.ALT = [vcf_record.ALT[0]]
if vcf_record.ALT[0] in [".",""]:
continue
if vcf_record.FORMAT['GT'] == '0':
vcf_record.FORMAT['GT'] = '0/0'
elif vcf_record.FORMAT['GT'] == '1':
vcf_record.FORMAT['GT'] = '1/1'
if 'GL' in vcf_record.FORMAT.keys() and 'GT_CONF' not in vcf_record.FORMAT.keys():
likelihoods = vcf_record.FORMAT['GL'].split(',')
assert(len(likelihoods) > 2)
if called_alleles == {'0'}:
vcf_record.set_format_key_value('GT_CONF',str(float(likelihoods[0]) - float(likelihoods[1])))
else:
vcf_record.set_format_key_value('GT_CONF', str(float(likelihoods[int(genotypes[0])]) - float(likelihoods[0])))
if 'SupportFraction' in vcf_record.INFO.keys() and 'GT_CONF' not in vcf_record.FORMAT.keys():
vcf_record.set_format_key_value('GT_CONF',
str(float(vcf_record.INFO['SupportFraction'])*100))
print(vcf_record, file=f)
@classmethod
def _write_vars_plus_flanks_to_fasta(cls, outfile, vcf_records, ref_seqs, flank_length, ref_only=False, number_ns=0):
'''Given a dict of vcf records made by vcf_file_read.vcf_file_to_dict(),
and its correcsponding file of reference sequences, writes a new fasta file
of each ref seq and inferred variant sequence plus flank_length nucleotides added to
its start and end. Calls each sequence:
ref_name.start_position.vcf_list_index.allele_number
where allele_numbers in same order as VCF, with ref seq = allele 0.'''
prev_ref_name = None
prev_ref_pos = None
j = 0
with open(outfile, 'w') as f:
for ref_name in sorted(vcf_records):
for i, vcf_record in enumerate(vcf_records[ref_name]):
start_position, alleles = vcf_record.inferred_var_seqs_plus_flanks(ref_seqs[ref_name], flank_length)
for allele_index, allele_seq in enumerate(alleles):
if not ref_only or allele_index == 0:
seq_name = '.'.join([ref_name, str(start_position + 1), str(j), str(i), str(allele_index)])
allele_seq = allele_seq.replace('.','')
for n in range(number_ns):
allele_seq = "N" + allele_seq + "N"
print('>' + seq_name, allele_seq, sep='\n', file=f)
if prev_ref_name == ref_name and prev_ref_pos == start_position:
j += 1
else:
j = 0
prev_ref_name = ref_name
prev_ref_pos = start_position
@classmethod
def _map_seqs_to_seqs(cls, seqs_file_ref, seqs_file_query, outfile):
'''Map seqs_file to ref_file using BWA MEM.
Output is SAM file written to outfile'''
bwa_binary = dependencies.find_binary('bwa')
command = ' '.join([
bwa_binary, 'index',
seqs_file_ref,
])
utils.syscall(command)
command = ' '.join([
bwa_binary, 'aln',
seqs_file_ref,
seqs_file_query,
'>', outfile + ".tmp",
])
utils.syscall(command)
command = ' '.join([
bwa_binary, 'samse',
seqs_file_ref,
outfile + ".tmp",
seqs_file_query,
'>', outfile,
])
utils.syscall(command)
#os.unlink(outfile + ".tmp")
@classmethod
def _check_if_sam_match_is_good(cls, sam_record, flank_length, query_sequence=None, allow_mismatches=True, max_soft_clipped=3):
logging.debug(f'Checking SAM: {sam_record}')
if sam_record.is_unmapped:
return 'Unmapped'
if not allow_mismatches:
try:
nm = sam_record.get_tag('NM')
except:
raise Error('No NM tag found in sam record:' + str(sam_record))
all_mapped = len(sam_record.cigartuples) == 1 and sam_record.cigartuples[0][0] == 0
if all_mapped and nm == 0:
logging.debug('SAM record passed no mismatches allowed check')
return 'Good'
else:
logging.debug('SAM record failed no mismatches allowed check')
return 'Bad_mismatches'
# don't allow too many soft clipped bases
if (sam_record.cigartuples[0][0] == 4 and sam_record.cigartuples[0][1] > max_soft_clipped) \
or (sam_record.cigartuples[-1][0] == 4 and sam_record.cigartuples[-1][1] > max_soft_clipped):
logging.debug('SAM record failed soft clipping check')
return 'Bad_soft_clipped'
if query_sequence is None:
query_sequence = sam_record.query_sequence
assert query_sequence is not None
# if the query is short, which happens when the variant we
# are checking is too near the start or end of the ref sequence
if len(query_sequence) < 2 * flank_length + 1:
# This is an edge case. We don't really know which part
# of the query seq we're looking for, so guess
length_diff = 2 * flank_length - len(query_sequence)
if sam_record.query_alignment_start < 5:
alt_seq_end = len(query_sequence) - flank_length - 1
alt_seq_start = min(alt_seq_end, flank_length - length_diff)
else:
alt_seq_start = flank_length
alt_seq_end = max(alt_seq_start, length_diff + len(query_sequence) - flank_length - 1)
else:
alt_seq_start = flank_length
alt_seq_end = len(query_sequence) - flank_length - 1
aligned_pairs = sam_record.get_aligned_pairs(with_seq=True)
logging.debug(f'aligned_pairs: {aligned_pairs}')
wanted_aligned_pairs = []
current_pos = 0
i = 0
while i < len(query_sequence):
if aligned_pairs[i][0] is None:
if alt_seq_start - 1 <= current_pos <= alt_seq_end + 1:
wanted_aligned_pairs.append(aligned_pairs[i])
elif current_pos > alt_seq_end:
break
else:
current_pos = aligned_pairs[i][0]
if alt_seq_start - 1 <= current_pos <= alt_seq_end + 1:
wanted_aligned_pairs.append(aligned_pairs[i])
i += 1
logging.debug(f'wanted_aligned_pairs: {wanted_aligned_pairs}')
assert len(wanted_aligned_pairs) > 0
for pair in wanted_aligned_pairs:
if None in pair or query_sequence[pair[0]].upper() != pair[2].upper():
logging.debug('SAM record failed because mismatch in allele sequence plus 1bp either side')
return 'Bad_allele_mismatch'
logging.debug('SAM record passed all checks')
return 'Good'
@classmethod
def _index_vcf(cls, vcffile):
'''Index VCF file'''
bgzip_binary = dependencies.find_binary('bgzip')
command = ' '.join([
bgzip_binary,
'-c',
vcffile,
'>',
vcffile + ".gz",
])
utils.syscall(command)
tabix_binary = dependencies.find_binary('tabix')
command = ' '.join([
tabix_binary,
'-p',
'vcf',
vcffile + ".gz",
])
utils.syscall(command)
@classmethod
def _parse_sam_file_and_vcf(cls, samfile, query_vcf_file, flank_length, allow_mismatches, exclude_regions=None, max_soft_clipped=3, number_ns=0):
if exclude_regions is None:
exclude_regions = {}
found = []
match_flag = []
correct_allele = []
gt_conf = []
allele = []
samfile_handle = pysam.AlignmentFile(samfile, "r")
sam_previous_record_name = None
for sam_record in samfile_handle.fetch(until_eof=True):
if sam_record.query_name == sam_previous_record_name:
continue
sam_previous_record_name = sam_record.query_name
found_conf = False
found_allele = False
# see if excluded region in bed file
ref, start, ref_num, var_num, allele_num = sam_record.query_name.rsplit('.', maxsplit=5)
start = int(start) + flank_length
exclude = False
for ref_name in exclude_regions.keys():
end = int(start) + 1
interval = pyfastaq.intervals.Interval(start, end)
exclude = EvaluateRecall._interval_intersects_an_interval_in_list(interval,
exclude_regions[ref_name])
if exclude:
found.append('Exclude')
gt_conf.append(0)
allele.append('0')
continue
match = EvaluateRecall._check_if_sam_match_is_good(sam_record,
flank_length,
query_sequence=sam_record.query_sequence,
allow_mismatches=allow_mismatches,
max_soft_clipped=max_soft_clipped)
alignment_start = str(sam_record).split("\t")[3]
match_flag.append(match)
if match == 'Good':
logging.debug('SAM record is a good match')
logging.debug('SAM record reference is %s' %sam_record.reference_name)
ref_name, expected_start, vcf_pos_index, vcf_record_index, allele_index = sam_record.reference_name.rsplit('.', maxsplit=4)
vcf_reader = pysam.VariantFile(query_vcf_file)
vcf_interval_start = int(expected_start) + int(alignment_start) + flank_length - 2 - number_ns
vcf_interval_end = int(expected_start) + int(alignment_start) + flank_length - number_ns
logging.debug('Find VCF records matching ref %s in interval [%i,%i]' %(ref_name, vcf_interval_start, vcf_interval_end))
for i, vcf_record in enumerate(vcf_reader.fetch(ref_name, vcf_interval_start, vcf_interval_end)):
if i == int(vcf_pos_index):
sample_name = vcf_record.samples.keys()[0]
if 'GT' in vcf_record.format.keys() and len(set(vcf_record.samples[sample_name]['GT'])) == 1:
if int(allele_index) == int(vcf_record.samples[sample_name]['GT'][0]):
found.append('1')
allele.append(str(allele_index))
correct_allele.append('1')
found_allele = True
if 'GT_CONF' in vcf_record.format.keys():
gt_conf.append(int(float(vcf_record.samples[sample_name]['GT_CONF'])))
found_conf = True
if not found_allele:
found.append('0')
allele.append('0')
correct_allele.append('0')
if not found_conf:
gt_conf.append(0)
assert len(found) == len(gt_conf)
assert len(found) == len(allele)
assert len(found) == len(match_flag)
assert len(found) == len(correct_allele)
return found, gt_conf, allele, match_flag, correct_allele
@classmethod
def _parse_sam_files(cls, truth_vcf_file, samfile, query_vcf_file, outfile, flank_length, allow_mismatches=True, exclude_regions=None, max_soft_clipped=3, number_ns=0):
'''Input is the original dnadiff snps file of sites we are searching for
and 2 SAM files made by _map_seqs_to_seqs(), which show mappings of snp sites
from from the dnadiff snps file to the vcf (i.e. searches if VCF contains an record
with the appropriate sequence.
Creates a tsv detailing whether the snp difference could be detected and at what
GT_CONF threshold.
'''
header_lines, vcf_records = vcf_file_read.vcf_file_to_dict(truth_vcf_file, sort=True,
homozygous_only=False,
remove_asterisk_alts=True,
remove_useless_start_nucleotides=True)
id = []
ref = []
alt = []
for ref_name in vcf_records:
for record in vcf_records[ref_name]:
id.append(record.POS)
ref.append(record.REF)
alt.append(record.ALT[0])
query_found, query_conf, query_allele, query_match_flag, query_allele_flag = EvaluateRecall._parse_sam_file_and_vcf(samfile, query_vcf_file,
flank_length, allow_mismatches,
exclude_regions, max_soft_clipped, number_ns)
assert len(id) == len(query_found)
out_df = pd.DataFrame({'id': id,
'ref': ref,
'alt': alt,
'query_found': query_found,
'query_conf': query_conf,
'query_allele': query_allele,
'query_match_flag': query_match_flag,
'query_allele_correct': query_allele_flag})
out_df.to_csv(outfile, sep='\t')
@classmethod
def _gather_stats(cls, tsv_file):
stats = {x: 0 for x in ['total', 'found_vars', 'missed_vars', 'excluded_vars']}
gt_conf_hist = {}
snps = | pd.read_csv(tsv_file, sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: <NAME> (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
from technicalta import *
#cwd = os.chdir("D:\\Udemy\\Zerodha KiteConnect API\\1_account_authorization")
apikey = '<KEY>'
#generate trading session
'''access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
'''
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df['close']-df['open']).median()#abs(df["close"]- df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser>0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
'''ohlc_df['open']=int(ohlc_df['open'])
ohlc_df['close']=int(ohlc_df['close'])
ohlc_df['high']=int(ohlc_df['high'])
ohlc_df['low']=int(ohlc_df['low'])'''
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["ZEEL","WIPRO","VEDL","ULTRACEMCO","UPL","TITAN","TECHM","TATASTEEL",
"TATAMOTORS","TCS","SUNPHARMA","SBIN","SHREECEM","RELIANCE","POWERGRID",
"ONGC","NESTLEIND","NTPC","MARUTI","M&M","LT","KOTAKBANK","JSWSTEEL","INFY",
"INDUSINDBK","IOC","ITC","ICICIBANK","HDFC","HINDUNILVR","HINDALCO",
"HEROMOTOCO","HDFCBANK","HCLTECH","GRASIM","GAIL","EICHERMOT","DRREDDY",
"COALINDIA","CIPLA","BRITANNIA","INFRATEL","BHARTIARTL","BPCL","BAJAJFINSV",
"BAJFINANCE","BAJAJ-AUTO","AXISBANK","ASIANPAINT","ADANIPORTS","IDEA",
"MCDOWELL-N","UBL","NIACL","SIEMENS","SRTRANSFIN","SBILIFE","PNB",
"PGHH","PFC","PEL","PIDILITIND","PETRONET","PAGEIND","OFSS","NMDC","NHPC",
"MOTHERSUMI","MARICO","LUPIN","L&TFH","INDIGO","IBULHSGFIN","ICICIPRULI",
"ICICIGI","HINDZINC","HINDPETRO","HAVELLS","HDFCLIFE","HDFCAMC","GODREJCP",
"GICRE","DIVISLAB","DABUR","DLF","CONCOR","COLPAL","CADILAHC","BOSCHLTD",
"BIOCON","BERGEPAINT","BANKBARODA","BANDHANBNK","BAJAJHLDNG","DMART",
"AUROPHARMA","ASHOKLEY","AMBUJACEM","ADANITRANS","ACC",
"WHIRLPOOL","WABCOINDIA","VOLTAS","VINATIORGA","VBL","VARROC","VGUARD",
"UNIONBANK","UCOBANK","TRENT","TORNTPOWER","TORNTPHARM","THERMAX","RAMCOCEM",
"TATAPOWER","TATACONSUM","TVSMOTOR","TTKPRESTIG","SYNGENE","SYMPHONY",
"SUPREMEIND","SUNDRMFAST","SUNDARMFIN","SUNTV","STRTECH","SAIL","SOLARINDS",
"SHRIRAMCIT","SCHAEFFLER","SANOFI","SRF","SKFINDIA","SJVN","RELAXO",
"RAJESHEXPO","RECLTD","RBLBANK","QUESS","PRESTIGE","POLYCAB","PHOENIXLTD",
"PFIZER","PNBHOUSING","PIIND","OIL","OBEROIRLTY","NAM-INDIA","NATIONALUM",
"NLCINDIA","NBCC","NATCOPHARM","MUTHOOTFIN","MPHASIS","MOTILALOFS","MINDTREE",
"MFSL","MRPL","MANAPPURAM","MAHINDCIE","M&MFIN","MGL","MRF","LTI","LICHSGFIN",
"LTTS","KANSAINER","KRBL","JUBILANT","JUBLFOOD","JINDALSTEL","JSWENERGY",
"IPCALAB","NAUKRI","IGL","IOB","INDHOTEL","INDIANB","IBVENTURES","IDFCFIRSTB",
"IDBI","ISEC","HUDCO","HONAUT","HAL","HEXAWARE","HATSUN","HEG","GSPL",
"GUJGASLTD","GRAPHITE","GODREJPROP","GODREJIND","GODREJAGRO","GLENMARK",
"GLAXO","GILLETTE","GMRINFRA","FRETAIL","FCONSUMER","FORTIS","FEDERALBNK",
"EXIDEIND","ESCORTS","ERIS","ENGINERSIN","ENDURANCE","EMAMILTD","EDELWEISS",
"EIHOTEL","LALPATHLAB","DALBHARAT","CUMMINSIND","CROMPTON","COROMANDEL","CUB",
"CHOLAFIN","CHOLAHLDNG","CENTRALBK","CASTROLIND","CANBK","CRISIL","CESC",
"BBTC","BLUEDART","BHEL","BHARATFORG","BEL","BAYERCROP","BATAINDIA",
"BANKINDIA","BALKRISIND","ATUL","ASTRAL","APOLLOTYRE","APOLLOHOSP",
"AMARAJABAT","ALKEM","APLLTD","AJANTPHARM","ABFRL","ABCAPITAL","ADANIPOWER",
"ADANIGREEN","ADANIGAS","ABBOTINDIA","AAVAS","AARTIIND","AUBANK","AIAENG","3MINDIA"]
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
'''
# Continuous execution
starttime=time.time()
timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
while time.time() <= timeout:
try:
print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
main()
time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()'''
from pprint import pprint
def AlphaData_fxintraday(frombase,to,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
frombase=['EUR','USD','GBP','AUD','EUR']
to=['USD','JPY','CAD','CNY','CHF','HKD','GBP','KRW']
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_intraday(i,j,60)
pprint('{}/{} Done'.format(i,j))
time.sleep(30)
'''
def AlphaData_fxdaily(frombase,to):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_DAILY&from_symbol={}&to_symbol={}&apikey={}".format(frombase,to,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,j,5)
datadaily=AlphaData_daily(i,j)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_daily(i,j)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)
'''
def AlphaData_intraday(symbol,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval={}min&apikey={}".format(symbol,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
def AlphaData_daily(symbol):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&apikey={}".format(symbol,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
''''
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,5)
datadaily=AlphaData_daily(i)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
ticks=['atvi','adbe','amd','alxn','algn','goog','googl','amzn','amgn','adi','anss','aapl','amat','asml','adsk','adp','bidu','biib','bmrn','bkng','avgo','cdns','cdw','cern','chtr','chkp','ctas','csco','ctxs','ctsh','cmcsa','cprt','cost','csx','dxcm','docu','dltr','ebay','ea','exc','expe','fb','fast','fisv','fox','foxa','gild','idxx','ilmn','incy','intc','intu','isrg','jd','klac','lrcx','lbtya','lbtyk','lulu','mar','mxim','meli','mchp','mu','msft','mrna','mdlz','mnst','ntap','ntes','nflx','nvda','nxpi','orly','pcar','payx','pypl','pep','qcom','regn','rost','sgen','siri','swks','splk','sbux','snps','tmus','ttwo','tsla','txn','khc','tcom','ulta','vrsn','vrsk','vrtx','wba','wdc','wday','xel','xlnx','zm']
patterns=['Two Crows',
'Three Black Crows',
'Three Inside Up/Down',
'Three-Line Strike',
'Three Outside Up/Down',
'Three Stars In The South',
'Three Advancing White Soldiers',
'Abandoned Baby',
'Advance Block',
'Belt-hold',
'Breakaway',
'Closing Marubozu',
'Concealing Baby Swallow',
'Counterattack',
'Dark Cloud Cover',
'Doji',
'Doji Star',
'Dragonfly Doji',
'Engulfing Pattern',
'Evening Doji Star',
'Evening Star',
'Up/Down-gap side-by-side white lines',
'Gravestone Doji',
'Hammer',
'Hanging Man',
'Harami Pattern',
'Harami Cross Pattern',
'High-Wave Candle',
'Hikkake Pattern',
'Modified Hikkake Pattern',
'Homing Pigeon',
'Identical Three Crows',
'In-Neck Pattern',
'Inverted Hammer',
'Kicking',
'Kicking - bull/bear',
'Ladder Bottom',
'Long Legged Doji',
'Long Line Candle',
'Marubozu',
'Matching Low',
'Mat Hold',
'Morning Doji Star',
'Morning Star',
'On-Neck Pattern',
'Piercing Pattern',
'Rickshaw Man',
'Rising/Falling Three Methods',
'Separating Lines',
'Shooting Star',
'Short Line Candle',
'Spinning Top',
'Stalled Pattern',
'Stick Sandwich',
'Takuri',
'Tasuki Gap',
'Thrusting Pattern',
'Tristar Pattern',
'Unique 3 River',
'Upside Gap Two Crows',
'Upside/Downside Gap Three Methods']
def texterconversion(text):
tex=text.replace('/','').replace('-','_').replace(' ','_').replace('(','').replace(')','')
return tex
def technical_lib(technical,df):
open=df['open']
high=df['high']
low=df['low']
close=df['close']
if technical == 'Two_Crows':
tech=Two_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Black_Crows':
tech=Three_Black_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Inside_UpDown':
tech=Three_Inside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Line_Strike':
tech=Three_Line_Strike(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Outside_UpDown':
tech=Three_Outside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Stars_In_The_South':
tech=Three_Stars_In_The_South(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Advancing_White_Soldiers':
tech=Three_Advancing_White_Soldiers(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Abandoned_Baby':
tech=Abandoned_Baby(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Advance_Block':
tech=Advance_Block(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Belt_hold':
tech=Belt_hold(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Breakaway':
tech=Breakaway(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Closing_Marubozu':
tech=Closing_Marubozu(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Concealing_Baby_Swallow':
tech=Concealing_Baby_Swallow(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Counterattack':
tech=Counterattack(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Dark_Cloud_Cover':
tech=Dark_Cloud_Cover(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Doji':
tech=Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Doji_Star':
tech=Doji_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Dragonfly_Doji':
tech=Dragonfly_Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Engulfing_Pattern':
tech=Engulfing_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Evening_Doji_Star':
tech=Evening_Doji_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Evening_Star':
tech=Evening_Star(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'UpDown_gap_side_by_side_white_lines':
tech=UpDown_gap_side_by_side_white_lines(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Gravestone_Doji':
tech=Gravestone_Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Hammer':
tech=Hammer(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Hanging_Man':
tech=Hanging_Man(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Harami_Pattern':
tech=Harami_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Harami_Cross_Pattern':
tech=Harami_Cross_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'High_Wave_Candle':
tech=High_Wave_Candle(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Hikkake_Pattern':
tech=Hikkake_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Modified_Hikkake_Pattern':
tech=Modified_Hikkake_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Homing_Pigeon':
tech=Homing_Pigeon(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Identical_Three_Crows':
tech=Identical_Three_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'In_Neck_Pattern':
tech=In_Neck_Pattern(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Inverted_Hammer':
tech=Inverted_Hammer(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Kicking':
tech=Kicking(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Kicking___bullbear':
tech=Kicking___bullbear(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Ladder_Bottom':
tech=Ladder_Bottom(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Long_Legged_Doji':
tech=Long_Legged_Doji(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Long_Line_Candle':
tech=Long_Line_Candle(open,high,low,close)
tech= | pd.DataFrame(tech) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Library to execute a exploratory data analysis (EDA). It is an approach to
analyzing data sets to summarize their main characteristics, often with visual
methods. Primarily EDA is for seeing what the data can tell us beyond the
formal modeling or hypothesis testing task.
@author: ucaiado
Created on 10/20/2016
"""
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
try:
import warnings
from IPython import get_ipython
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
import json
import numpy as np
import pandas as pd
import StringIO
warnings.filterwarnings('ignore', category=UserWarning,
module='matplotlib')
# Display inline matplotlib plots with IPython
get_ipython().run_line_magic('matplotlib', 'inline')
# aesthetics
sns.set_palette('deep', desat=.6)
sns.set_context(rc={'figure.figsize': (8, 4)})
sns.set_style('whitegrid')
sns.set_palette(sns.color_palette('Set2', 10))
# loading style sheet
get_ipython().run_cell('from IPython.core.display import HTML')
get_ipython().run_cell('HTML(open("ipython_style.css").read())')
except:
pass
###########################################
'''
Begin help functions
'''
def func_estimator(x):
'''
pseudo estimator to be used by poinplot
'''
return x[0]
'''
End help functions
'''
def read_logs(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[1][1:].split('}}')[0]
s_mid = s_mid.replace("'", "")[1:]
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_mid.split(',')]]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_pos)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs2(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[0].split(', position =')[0]
s_mid = s_mid.replace("'", '"').replace('None', '0')
l_mid = json.loads(s_mid)
s_mid = s_mid.replace("'", "")[1:]
l_mid = [(s_key, (float(x))) for s_key, x
in l_mid['midPrice'].iteritems()]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_mid)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs_to_form_spread(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy (just F21 and
F19)
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'mid': defaultdict(dict),
'duration': defaultdict(dict),
'TOB_F21': defaultdict(dict),
'TOB_F19': defaultdict(dict),
'MY_PRICES': defaultdict(dict),
'EXEC': defaultdict(dict),
'LAST_SPREAD': defaultdict(dict)} # where I am most aggres
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
l_trade_actions = ['TAKE', 'crossed_prices', 'correction_by_trade',
'HIT']
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = | pd.to_datetime(s_date) | pandas.to_datetime |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 1 16:30:36 2017
@author: yhsui
"""
from math import sqrt
from numpy import concatenate
import numpy as np
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# convert data to multiple time steps
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = | DataFrame(data) | pandas.DataFrame |
import pandas as pd
#import numpy as np
StreamsSheet = | pd.read_csv('Streams.csv', header=0, index_col=0) | pandas.read_csv |
from collections import namedtuple
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import tfs
from pandas.testing import assert_frame_equal
from test_rdt import arrays_are_close_almost_everywhere
from optics_functions.constants import (
ALPHA, BETA, F1001, F1010, GAMMA, IMAG, NAME, PHASE_ADV, REAL, TUNE, S, X, Y
)
from optics_functions.coupling import (
COUPLING_RDTS,
check_resonance_relation,
closest_tune_approach,
coupling_via_cmatrix,
coupling_via_rdts,
rmatrix_from_coupling,
)
from optics_functions.utils import prepare_twiss_dataframe
INPUT = Path(__file__).parent.parent / "inputs"
COUPLING_BUMP_INPUTS = INPUT / "coupling_bump"
COUPLING_BUMP_TWISS_BEAM_1 = COUPLING_BUMP_INPUTS / "twiss.lhc.b1.coupling_bump.tfs"
@pytest.mark.basic
def test_cmatrix():
n = 5
np.random.seed(487423872)
df = generate_fake_data(n)
df_res = coupling_via_cmatrix(df)
assert all(c in df_res.columns for c in (F1001, F1010, "C11", "C12", "C21", "C22", GAMMA))
assert not df_res.isna().any().any()
# Checks based on CalagaBetatronCoupling2005
detC = df_res["C11"] * df_res["C22"] - df_res["C12"] * df_res["C21"]
fsq_diff = df_res[F1001].abs() ** 2 - df_res[F1010].abs() ** 2
f_term = 1 / (1 + 4 * fsq_diff)
g_sq = df_res[GAMMA] ** 2
assert all(np.abs(detC + g_sq - 1) < 1e-15)
assert all(np.abs(detC / (4 * g_sq) - fsq_diff) < 1e-15) # Eq. (13)
assert all(np.abs(detC + f_term - 1) < 1e-15) # Eq. (13)
assert all(np.abs(g_sq - f_term) < 1e-15) # Eq. (14)
@pytest.mark.basic
@pytest.mark.parametrize("source", ["real", "fake"])
def test_rmatrix_to_coupling_to_rmatrix(source):
if source == "fake":
np.random.seed(487423872)
df = generate_fake_data(5)
else:
df = tfs.read(INPUT / "coupling_bump" / f"twiss.lhc.b1.coupling_bump.tfs", index=NAME)
df_coupling = coupling_via_cmatrix(df)
for col in (f"{ALPHA}X", f"{BETA}X", f"{ALPHA}Y", f"{BETA}Y"):
df_coupling[col] = df[col]
df_res = rmatrix_from_coupling(df_coupling)
for col in ("R11", "R12", "R21", "R22"):
# print(col, "\n", max(np.abs(df[col] - df_res[col]))) # for debugging
assert all(np.abs(df[col] - df_res[col]) < 5e-15)
@pytest.mark.basic
def test_real_output():
n = 7
np.random.seed(474987942)
df = generate_fake_data(n)
df = prepare_twiss_dataframe(df_twiss=df)
df.loc[:, "K1L"] = np.random.rand(n)
df.loc[:, "K1SL"] = np.random.rand(n)
df_cmatrix = coupling_via_cmatrix(df, complex_columns=False)
df_rdts = coupling_via_rdts(df, qx=1.31, qy=1.32, complex_columns=False)
assert all(np.real(df_cmatrix) == df_cmatrix)
assert all(np.real(df_rdts) == df_rdts)
columns = [f"{c}{r}" for c in COUPLING_RDTS for r in (REAL, IMAG)]
assert df_rdts[columns].all().all() # check no 0-values
assert df_cmatrix[columns].all().all() # check no 0-values
assert df_cmatrix.columns.str.match(f".+{REAL}$").sum() == 2
assert df_cmatrix.columns.str.match(f".+{IMAG}$").sum() == 2
assert df_rdts.columns.str.match(f".+{REAL}$").sum() == 2
assert df_rdts.columns.str.match(f".+{IMAG}$").sum() == 2
@pytest.mark.basic
@pytest.mark.parametrize(
"cta_method, max_relative_error_to_teapot, result_should_be_real",
[
("calaga", 0, True), # this is the same as teapot, hence 0 relative error
("franchi", 0.001, True),
("teapot_franchi", 0.0005, True),
("persson", 0.25, False), # not sure why it is so high from here
("persson_alt", 0.25, False),
("hoydalsvik", 0.25, False),
("hoydalsvik_alt", 0.25, False),
],
)
def test_closest_tune_approach(
cta_method, max_relative_error_to_teapot, result_should_be_real, _coupling_bump_teapot_cta
):
df_twiss = tfs.read(COUPLING_BUMP_TWISS_BEAM_1, index=NAME)
df = prepare_twiss_dataframe(df_twiss=df_twiss, max_order=7)
df_cmatrix = coupling_via_cmatrix(df)
df_twiss[F1001] = df_cmatrix[F1001] # ignoring F1010 in this test as it is bigger than F1001
cta_df = closest_tune_approach(df_twiss, method=cta_method) # only one column
cminus = cta_df.mean().abs()[0]
relative_error = _relative_error(cminus, _coupling_bump_teapot_cta)
assert relative_error <= max_relative_error_to_teapot
assert not cta_df.isna().any().any() # check no NaNs
assert all(cta_df[df_cmatrix[F1001] != 0] != 0)
if result_should_be_real:
assert all(np.isreal(cta_df))
@pytest.mark.basic
def test_check_resonance_relation_with_nan(caplog):
df = pd.DataFrame([[1, 2, 3, 4], [2, 1, -5, 1]], index=[F1001, F1010]).T
df_nan = check_resonance_relation(df, to_nan=True)
assert all(df_nan.loc[0, :].isna())
assert all(df_nan.loc[2, :].isna())
assert all(df_nan.loc[1, :] == df.loc[1, :])
assert all(df_nan.loc[3, :] == df.loc[3, :])
assert "|F1001| < |F1010|" in caplog.text
@pytest.mark.basic
def test_check_resonance_relation_without_nan(caplog):
df = pd.DataFrame([[1, 2, 3, 4], [2, 1, 5, 1]], index=[F1001, F1010]).T
df_out = check_resonance_relation(df, to_nan=False)
assert_frame_equal(df_out, df)
assert "|F1001| < |F1010|" in caplog.text
@pytest.mark.basic
def test_check_resonance_relation_all_good(caplog):
df = | pd.DataFrame([[2, 3, 4, 5], [1, 3, 3, 4]], index=[F1001, F1010]) | pandas.DataFrame |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'生化': "select '生化' as 业务类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7)",
'检查': " select '检查' as 业务类型 , count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) ",
'体温': " select '体温' as 业务类型 , count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_exam_temp_first_level_first_fig','figure'),
Output('rout_exam_temp_first_level_first_fig_data','data'),
Input('rout_exam_temp_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if rout_exam_temp_first_level_first_fig_data is None:
rout_exam_temp_first_level_first_fig_data = {}
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split', date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data['btime'] = btime
rout_exam_temp_first_level_first_fig_data['etime'] = etime
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig_data = json.loads(rout_exam_temp_first_level_first_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_first_fig_data['hosname']:
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split',date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig = pd.read_json(rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'], orient='split')
rout_exam_temp_first_level_first_fig_data = dash.no_update
#
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig[(rout_exam_temp_first_level_first_fig['month']>=btime) & (rout_exam_temp_first_level_first_fig['month']<=etime)]
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(rout_exam_temp_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,rout_exam_temp_first_level_first_fig_data
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取一级第二张图片数据
def get_first_lev_second_fig_date(engine):
res = pd.DataFrame(columns=['问题类型', 'num' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'体温测量时间缺失': f"select '体温测量时间缺失' as 问题类型 ,count(1) as num from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select '生化检验申请时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select '生化检验报告时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select '检查时间为空' as 问题类型 ,count(1) as num from exam where EXAM_DATE is null ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新一级图二
@app.callback(
Output('rout_exam_temp_first_level_second_fig','figure'),
Output('rout_exam_temp_first_level_second_fig_data','data'),
Input('rout_exam_temp_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_exam_temp_first_level_second_fig_data is None:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig_data = json.loads(rout_exam_temp_first_level_second_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_second_fig_data['hosname']:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data[ 'rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig = pd.read_json( rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'], orient='split')
rout_exam_temp_first_level_second_fig_data = dash.no_update
fig = go.Figure()
# fig = px.bar(rout_exam_temp_first_level_second_fig,x='问题类型',y='num',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.add_trace(
go.Bar(x=rout_exam_temp_first_level_second_fig['问题类型'], y=rout_exam_temp_first_level_second_fig['num'], name="问题类型",
marker_color=px.colors.qualitative.Dark24, )
)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_exam_temp_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('rout_exam_temp_first_level_second_fig_detail', 'data'),
Input('rout_exam_temp_first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量时间缺失': f"select * from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select * from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select * from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select * from exam where EXAM_DATE is null ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}时间缺失数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取体温二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型','num','momth'])
bus_dic = {
'体温测量值异常': f"select '体温测量值异常' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量值缺失': f"select '体温测量值缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'科室缺失': f"select '科室缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时机缺失': f"select '体温测量时机缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间无时间点': f"select '检验测量时间无时间点' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间在出入院时间之外': f""" select '体温测量时间在出入院时间之外' as 问题类型,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
group by substr(RECORDDATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新二级图一
@app.callback(
Output('temp_second_level_first_fig','figure'),
Output('temp_second_level_first_fig_data','data'),
Input('temp_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_first_level_second_fig(temp_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if temp_second_level_first_fig_data is None:
temp_second_level_first_fig_data = {}
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig_data = json.loads(temp_second_level_first_fig_data)
if db_con_url['hosname'] != temp_second_level_first_fig_data['hosname']:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
if temp_second_level_first_fig_data['btime'] != btime or temp_second_level_first_fig_data[ 'etime'] != etime:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data[ 'temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig = pd.read_json( temp_second_level_first_fig_data['temp_second_level_first_fig'], orient='split')
temp_second_level_first_fig_data = dash.no_update
temp_second_level_first_fig = temp_second_level_first_fig.sort_values(['month'])
fig = px.line(temp_second_level_first_fig, x="month", y="num", color='问题类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="体温测量数量", )
fig.update_xaxes(title_text="月份", )
return fig, temp_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('temp_second_level_first_fig_detail', 'data'),
Input('temp_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量值异常': f"select * from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量值缺失': f"select * from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'科室缺失': f"select * from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时机缺失': f"select * from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间无时间点': f"select * from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}体温问题数据明细.xlsx')
else:
return dash.no_update
#
# # # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取生化检验三级第一张图数据
def get_third_lev_first_fig_date(engine,btime,etime):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['问题类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'标本缺失': f"select '标本缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null group by substr(REQUESTTIME,1,7)",
'检验项目缺失': f"select '检验项目缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null group by substr(REQUESTTIME,1,7)",
'检验结果缺失': f"select '检验结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null group by substr(REQUESTTIME,1,7)",
'院内外标识缺失': f"select '院内外标识缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null group by substr(REQUESTTIME,1,7)",
'检验子项缺失': f"select '检验子项缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null group by substr(REQUESTTIME,1,7)",
'定性结果缺失': f"select '定性结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null group by substr(REQUESTTIME,1,7)",
'申请时间大于等于报告时间': f"select '申请时间大于等于报告时间' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by substr(REQUESTTIME,1,7)",
'申请时间在出入院时间之外': f""" select '申请时间在出入院时间之外' as 问题类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_third_level_first_fig','figure'),
Output('rout_third_level_first_fig_data','data'),
Input('rout_third_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_third_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_first_fig_data is None:
rout_third_level_first_fig_data = {}
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig_data = json.loads(rout_third_level_first_fig_data)
if db_con_url['hosname'] != rout_third_level_first_fig_data['hosname']:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
if rout_third_level_first_fig_data['btime'] != btime or rout_third_level_first_fig_data[ 'etime'] != etime:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data[ 'rout_third_level_first_fig'] = rout_third_level_first_fig.to_json(orient='split', date_format='iso')
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig = pd.read_json( rout_third_level_first_fig_data['rout_third_level_first_fig'], orient='split')
rout_third_level_first_fig_data = dash.no_update
rout_third_level_first_fig = rout_third_level_first_fig.sort_values(['month'])
fig = px.line(rout_third_level_first_fig,x='month',y='num',color='问题类型',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_third_level_first_fig_data
# 下载三级图一明细
@app.callback(
Output('rout_third_level_first_fig_detail', 'data'),
Input('rout_third_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'标本缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null ",
'检验项目缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null ",
'检验结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null ",
'院内外标识缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null ",
'检验子项缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null ",
'定性结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null ",
'申请时间大于等于报告时间': f"select * from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' ",
'申请时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}生化检验问题数据明细.xlsx')
else:
return dash.no_update
# # # ----------------------------------------------------------------------------------------------------- 三级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取生化三级第二张图数据
def get_third_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"select RTYPE as 生化检验类型,count(distinct CASEID||TESTNO||RTYPE) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where RTYPE is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by RTYPE,substr(REQUESTTIME,1,7)",con=engine)
return res
# 更新生化三级第二张图
@app.callback(
Output('rout_third_level_second_fig','figure'),
Output('rout_third_level_second_fig_data','data'),
Input('rout_third_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(rout_third_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_second_fig_data is None:
rout_third_level_second_fig_data = {}
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split', date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig_data = json.loads(rout_third_level_second_fig_data)
if db_con_url['hosname'] != rout_third_level_second_fig_data['hosname']:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
if rout_third_level_second_fig_data['btime'] != btime or rout_third_level_second_fig_data['etime'] != etime:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig = pd.read_json(rout_third_level_second_fig_data['rout_third_level_second_fig'], orient='split')
rout_third_level_second_fig_data = dash.no_update
rout_third_level_second_fig = rout_third_level_second_fig.sort_values(['month'])
# fig = px.line(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig = px.bar(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="生化检验数量", )
fig.update_xaxes(title_text="月份", )
return fig,rout_third_level_second_fig_data
#
# # ----------------------------------------------------------------------------------------------------- 四级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取检查四级第一张图数据
def get_fourth_level_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型', 'num', 'month'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'检查类别缺失': f"select '检查类别缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null group by substr(EXAM_DATE,1,7)",
'检查部位缺失': f"select '检验部位缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null group by substr(EXAM_DATE,1,7)",
'检查所见缺失': f"select '检查所见缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null group by substr(EXAM_DATE,1,7)",
'检查印象缺失': f"select '检查印象缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null group by substr(EXAM_DATE,1,7)",
'检查时间在出入院时间之外': f""" select '检查时间在出入院时间之外' as 问题类型,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
group by substr(EXAM_DATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus], con=engine))
return res
# 四级第一张图更新
@app.callback(
Output('exam_fourth_level_first_fig','figure'),
Output('exam_fourth_level_first_fig_data', 'data'),
Input('exam_fourth_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(exam_fourth_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if exam_fourth_level_first_fig_data is None:
exam_fourth_level_first_fig_data = {}
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json( orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig_data = json.loads(exam_fourth_level_first_fig_data)
if db_con_url['hosname'] != exam_fourth_level_first_fig_data['hosname']:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
if exam_fourth_level_first_fig_data['btime'] != btime or exam_fourth_level_first_fig_data['etime'] != etime:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig = pd.read_json( exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'], orient='split')
exam_fourth_level_first_fig_data = dash.no_update
exam_fourth_level_first_fig = exam_fourth_level_first_fig.sort_values(['month'])
fig = px.line(exam_fourth_level_first_fig, x="month", y="num", color='问题类型', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="问题数量", )
fig.update_xaxes(title_text="月份", )
return fig,exam_fourth_level_first_fig_data
# 下载四级图一明细
@app.callback(
Output('exam_fourth_level_first_fig_detail', 'data'),
Input('exam_fourth_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'检查类别缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null ",
'检查部位缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null ",
'检查所见缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null ",
'检查印象缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null ",
'检查时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}检查问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 四级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取检查四级第二张图数据
def get_fourth_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"select EXAM_CLASS as 检查类别,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_CLASS is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' group by substr(EXAM_DATE,1,7),EXAM_CLASS ",con=engine)
return res
# 四级第一张图更新
@app.callback(
Output('exam_fourth_level_second_fig','figure'),
Output('exam_fourth_level_second_fig_data', 'data'),
Input('exam_fourth_level_second_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(exam_fourth_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if exam_fourth_level_second_fig_data is None:
exam_fourth_level_second_fig_data = {}
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json( orient='split', date_format='iso')
exam_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
exam_fourth_level_second_fig_data = json.loads(exam_fourth_level_second_fig_data)
if db_con_url['hosname'] != exam_fourth_level_second_fig_data['hosname']:
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
if exam_fourth_level_second_fig_data['btime'] != btime or exam_fourth_level_second_fig_data['etime'] != etime:
exam_fourth_level_second_fig = get_fourth_level_second_fig_date(engine, btime, etime)
exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'] = exam_fourth_level_second_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_second_fig_data['btime'] = btime
exam_fourth_level_second_fig_data['etime'] = etime
exam_fourth_level_second_fig_data = json.dumps(exam_fourth_level_second_fig_data)
else:
exam_fourth_level_second_fig = pd.read_json( exam_fourth_level_second_fig_data['exam_fourth_level_second_fig'], orient='split')
exam_fourth_level_second_fig_data = dash.no_update
exam_fourth_level_second_fig = exam_fourth_level_second_fig.sort_values(['month'])
fig = px.bar(exam_fourth_level_second_fig, x="month", y="num", color='检查类别', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="检查数量", )
fig.update_xaxes(title_text="月份", )
return fig,exam_fourth_level_second_fig_data
# # # ----------------------------------------------------------------------------------------------------- 全部下载 ----------------------------------------------------------------------------------------------------------------------
# 页面数据统计结果下载
@app.callback(
Output("down-rout-exam-temp", "data"),
Input("rout-exam-temp-all-count-data-down", "n_clicks"),
Input("rout_exam_temp_first_level_first_fig_data", "data"),
Input("rout_exam_temp_first_level_second_fig_data", "data"),
Input("temp_second_level_first_fig_data", "data"),
Input("rout_third_level_first_fig_data", "data"),
Input("rout_third_level_second_fig_data", "data"),
Input("exam_fourth_level_first_fig_data", "data"),
Input("exam_fourth_level_second_fig_data", "data"),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def get_all_count_data(n_clicks, rout_exam_temp_first_level_first_fig_data,
rout_exam_temp_first_level_second_fig_data,
temp_second_level_first_fig_data,
rout_third_level_first_fig_data,
rout_third_level_second_fig_data,
exam_fourth_level_first_fig_data,
exam_fourth_level_second_fig_data,
db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
hosName = db_con_url['hosname']
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
now_time = str(datetime.now())[0:19].replace(' ', '_').replace(':', '_')
if rout_exam_temp_first_level_first_fig_data is not None and rout_exam_temp_first_level_second_fig_data is not None and temp_second_level_first_fig_data is not None and \
rout_third_level_first_fig_data is not None and rout_third_level_second_fig_data is not None and exam_fourth_level_first_fig_data is not None and exam_fourth_level_second_fig_data is not None :
rout_exam_temp_first_level_first_fig_data = json.loads(rout_exam_temp_first_level_first_fig_data )
rout_exam_temp_first_level_second_fig_data = json.loads(rout_exam_temp_first_level_second_fig_data )
temp_second_level_first_fig_data = json.loads(temp_second_level_first_fig_data )
rout_third_level_first_fig_data = json.loads(rout_third_level_first_fig_data )
rout_third_level_second_fig_data = json.loads(rout_third_level_second_fig_data )
exam_fourth_level_first_fig_data = json.loads(exam_fourth_level_first_fig_data )
exam_fourth_level_second_fig_data = json.loads(exam_fourth_level_second_fig_data )
if rout_exam_temp_first_level_first_fig_data['hosname'] == hosName and \
rout_exam_temp_first_level_second_fig_data['hosname'] == hosName and \
temp_second_level_first_fig_data['hosname'] == hosName and temp_second_level_first_fig_data['btime'] == btime and temp_second_level_first_fig_data['etime'] == etime and \
rout_third_level_first_fig_data['hosname'] == hosName and rout_third_level_first_fig_data['btime'] == btime and rout_third_level_first_fig_data['etime'] == etime and\
rout_third_level_second_fig_data['hosname'] == hosName and rout_third_level_second_fig_data['btime'] == btime and rout_third_level_second_fig_data['etime'] == etime and \
exam_fourth_level_first_fig_data['hosname'] == hosName and exam_fourth_level_first_fig_data['btime'] == btime and exam_fourth_level_first_fig_data['etime'] == etime and \
exam_fourth_level_second_fig_data['hosname'] == hosName and exam_fourth_level_second_fig_data['btime'] == btime and exam_fourth_level_second_fig_data['etime'] == etime :
rout_exam_temp_first_level_first_fig_data = | pd.read_json( rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'], orient='split') | pandas.read_json |
"""Regression Model"""
__docformat__ = "numpy"
import os
import warnings
import logging
from typing import List, Tuple, Dict, Any
import pandas as pd
from linearmodels import PooledOLS
from linearmodels.panel import (
RandomEffects,
BetweenOLS,
PanelOLS,
FirstDifferenceOLS,
compare,
)
from pandas import DataFrame
from statsmodels.api import add_constant
import statsmodels.api as sm
from statsmodels.stats.api import het_breuschpagan
from statsmodels.stats.diagnostic import acorr_breusch_godfrey
from statsmodels.stats.stattools import durbin_watson
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
def get_regressions_results(
regression_type: str,
regression_variables: List[Tuple],
data: Dict[str, pd.DataFrame],
datasets: Dict[pd.DataFrame, Any],
entity_effects: bool = False,
time_effects: bool = False,
) -> Tuple[DataFrame, Any, List[Any], Any]:
"""Based on the regression type, this function decides what regression to run.
Parameters
----------
regression_type: str
The type of regression you wish to execute.
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
datasets: dict
A dictionary containing the column and dataset names of
each column/dataset combination.
entity_effects: bool
Whether to apply Fixed Effects on entities.
time_effects: bool
Whether to apply Fixed Effects on time.
Returns
-------
The dataset used, the dependent variable, the independent variable and
the regression model.
"""
if regression_type == "OLS":
return get_ols(regression_variables, data, datasets, False)
if regression_type == "POLS":
return get_pols(regression_variables, data, datasets)
if regression_type == "RE":
return get_re(regression_variables, data, datasets)
if regression_type == "BOLS":
return get_bols(regression_variables, data, datasets)
if regression_type == "FE":
return get_fe(
regression_variables, data, datasets, entity_effects, time_effects
)
if regression_type == "FDOLS":
return get_fdols(regression_variables, data, datasets)
return console.print(f"{regression_type} is not an option.")
def get_regression_data(
regression_variables: List[tuple],
data: Dict[str, pd.DataFrame],
datasets: Dict[pd.DataFrame, Any],
regression_type: str = "",
) -> Tuple[DataFrame, Any, List[Any]]:
"""This function creates a DataFrame with the required regression data as
well sets up the dependent and independent variables.
Parameters
----------
regression_variables : list
The regressions variables entered where the first variable is
the dependent variable.
data : dict
A dictionary containing the datasets.
datasets: dict
A dictionary containing the column and dataset names of
each column/dataset combination.
regression_type: str
The type of regression that is executed.
Returns
-------
The dataset used, the dependent variable, the independent variable and
the OLS model.
"""
regression = {}
independent_variables = []
dependent_variable = None
for variable in regression_variables:
column, dataset = datasets[variable].keys()
regression[f"{column}_{dataset}"] = data[dataset][column]
if variable == regression_variables[0]:
dependent_variable = f"{column}_{dataset}"
elif variable in regression_variables[1:]:
independent_variables.append(f"{column}_{dataset}")
regression_df = | pd.DataFrame(regression) | pandas.DataFrame |
from py_ball import league, image, boxscore
import matplotlib.pyplot as plt
import time
import pandas as pd
import sys
import requests_cache
HEADERS = {'Host': 'stats.nba.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'x-nba-stats-origin': 'stats',
'x-nba-stats-token': 'true',
'Connection': 'keep-alive',
'Referer': 'https://stats.nba.com/',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'}
def pad_id(num):
""" pad_id adds the requisite number of leading
zeroes to a game number to form a valid game_id
@param num (int): Regular season game number
Returns:
num_str (str): Regular season game number
with leading zeroes
"""
num_str = str(num)
while len(num_str) < 4:
num_str = '0' + num_str
return num_str
def get_year_boxscores(year):
""" get_year_boxscores pulls data from the
boxscores endpoint of the stats.nba.com API
@param year (int): year corresponding to the year
in which the season began. For example, the 2017-2018
NBA season is represented as the year 2017.
Returns:
Saves two .csv files of the player and team boxscore data to the
current working directory with a name formatted
as player_boxscores_[year].csv and team_boxscores_[year].csv
"""
requests_cache.install_cache('nba_cache_' +str(year))
year_sub = str(year)[-2:]
base = '002' + year_sub + '0'
box_score_plus = pd.DataFrame({})
game_summaries = pd.DataFrame({})
total_officals = pd.DataFrame({})
if year == 2019:
num_games = 706
elif year > 2011:
num_games = 41 * 30
elif year == 2011:
num_games = 990
elif year > 2003:
num_games = 41 * 30
elif year > 1998:
num_games = 41 * 29
elif year == 1998:
num_games = 25 * 29
elif year > 1994:
num_games = 41 * 29
elif year > 1989:
num_games = 41 * 27
elif year > 1987:
num_games = 41 * 25
else:
num_games = 41 * 23
for x in range(1, num_games + 1):
print(str(year) + ': Game #' + str(x))
game_id_here = base + pad_id(x)
t0 = time.time()
success = 0
counter = 0
while ((not success) and (counter < 10)):
try:
trad = boxscore.BoxScore(headers=HEADERS, game_id=game_id_here, endpoint='boxscoretraditionalv2')
time.sleep(2)
if (year > 1995):
ff = boxscore.BoxScore(headers=HEADERS, game_id=game_id_here, endpoint='boxscorefourfactorsv2')
time.sleep(2)
misc = boxscore.BoxScore(headers=HEADERS, game_id=game_id_here, endpoint='boxscoremiscv2')
time.sleep(2)
summary = boxscore.BoxScore(headers=HEADERS, game_id=game_id_here, endpoint='boxscoresummaryv2')
success = 1
except:
print("Trying again")
time.sleep(2)
success = 0
counter += 1
if counter == 10:
continue
box_score_headers = [
'GAME_ID', 'TEAM_ID', 'TEAM_ABBREVIATION', 'TEAM_CITY',
'PLAYER_ID', 'PLAYER_NAME', 'START_POSITION', 'COMMENT',
'MIN', 'FGM', 'FGA', 'FG_PCT', 'FG3A', 'FG3M', 'FG3_PCT',
'FTA', 'FTM', 'FT_PCT', 'DREB', 'OREB', 'REB',
'AST', 'STL', 'BLK', 'TO', 'PF', 'PTS', 'PLUS_MINUS'
]
player_box = pd.DataFrame(trad.data['PlayerStats'])
player_box = player_box[box_score_headers]
new_player_box = player_box.sort_values(['TEAM_CITY','PLAYER_ID'])
if (year > 1995):
ff_headers = [
'EFG_PCT', 'FTA_RATE', 'OPP_EFG_PCT', 'OPP_FTA_RATE',
'OPP_OREB_PCT', 'OPP_TOV_PCT', 'OREB_PCT', 'TM_TOV_PCT']
new_ff = pd.DataFrame(ff.data['sqlPlayersFourFactors']).sort_values(['TEAM_CITY','PLAYER_ID'])[ff_headers]
misc_headers = ['BLKA', 'OPP_PTS_2ND_CHANCE',
'OPP_PTS_FB', 'OPP_PTS_OFF_TOV', 'OPP_PTS_PAINT', 'PFD',
'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_OFF_TOV',
'PTS_PAINT']
new_misc = pd.DataFrame(misc.data['sqlPlayersMisc']).sort_values(['TEAM_CITY','PLAYER_ID'])[misc_headers]
new_box = pd.concat([new_player_box, new_ff, new_misc], axis=1)
else:
new_box = new_player_box
game_summary = pd.DataFrame(summary.data['GameSummary'])
game_info = pd.DataFrame(summary.data['GameInfo'])
new_summary = pd.concat([game_summary,
game_info],
axis=1)
new_summary['HOME_TEAM_ID'] = new_summary['HOME_TEAM_ID'].apply(str)
new_summary['VISITOR_TEAM_ID'] = new_summary['VISITOR_TEAM_ID'].apply(str)
officals = pd.DataFrame(summary.data['Officials'])
officals['GAMECODE'] = | pd.DataFrame(game_summary) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.