prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/local/bin/python
""" Mess-around project to learn more python.
I organize my card collection according to price. The $TRADE_BOX_THRESHOLD is for cards kept in my trade box. I adjust this whenever the trade box gets full.
$1-$TRADE_BOX_THRESHOLD cards are kept in a separate less accessed box. Anything under $1 is kept as a "bulk." Since prices change, it's a bit of a pain to check each card's value.
This program will compare current prices to an older version of the inventory and produce a report reports:
Report 1) Cards that dropped from trade box, $TRADE_BOX_THRESHOLD->$1; $TRADE_BOX_THRESHOLD->bulk w/ gross delta; by color, alphabetized
Report 2) Cards that changed from dollar box, $1->$TRADE_BOX_THRESHOLD; $1->bulk w/gross delta; by color, alphabetized
Report 3) Cards that increased from bulk box, bulk->$1; bulk-$TRADE_BOX_THRESHOLD w/gross delta; by color, alphabetized
Overall change in value, any notes
So I have to export from deckbox.org, then compare to old listCardsCSVs
Export doesn't have color, so I have to compare to a library fetched from mtgjson.org, if listCardsCSVs aren't found then it probably means
I need to update mtgjson.org
I'll schedule this to run every month or so
Goals: learn csv with pandas, http stuff with requests, json stuff
Thanks to - https://deckbox.org/help/tooltips tooltip library
TODO - figure out much change is from new stuff, this is interesting for a different reason; organic vs. adds
TODO - little graphs showing change from month to month visually
TODO - heatmap of changed by set
TODO - heatmap of absolute and average value by set
Pass in "--debug" for additional log output
"""
import datetime
import io
import json
import logging
import numpy
import os
import pandas
import platform
import re
import requests
import shutil
import smtplib
import sys
import zipfile
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
MAGIC_CARD_JSON_URL = "https://mtgjson.com/api/v5/AtomicCards.json.zip"
DATA_DIR_NAME = "data/"
RUN_LOG_FILE_NAME = DATA_DIR_NAME + "run-log.json"
CONFIG_FILE_NAME = "config.json"
COOKIE_FILE_NAME = "cookies.json"
TRADE_BOX_THRESHOLD = 10 # this might change, but it's this for now
BULK_BOX_THRESHOLD = 3 # used to be a dollar, but some buyers said less than #4 is bulk
CURRENT_VERSION = "0.0.20"
HOST_NAME = platform.node()
def makeCookies(cookies):
"""write a dictionary of http cookies to a local file"""
with open(COOKIE_FILE_NAME, "w") as file:
json.dump(cookies, file)
return
def eatCookies():
"""read a dictionary of http cookies from a local file"""
with open(COOKIE_FILE_NAME, "r") as file:
return json.load(file)
def getCardLibrary(libFile):
"""go get a card json library, write it to disk, unzip it and return it as a Path"""
# keep the zip too so we cn compare byte size for updates
debug("in getCardLib:" + str(libFile))
response = requests.get(MAGIC_CARD_JSON_URL, stream=True)
bytes = io.BytesIO(response.content)
with open(str(libFile.with_suffix(".zip")), "wb") as file:
file.write(bytes.read(-1))
zip = zipfile.ZipFile(bytes)
zip.extractall(str(libFile.parent))
return libFile
def cleanCardDataFrame(df):
"""clean up and prep the data frame, remove unnecessary columns, change formats
expects a DataFrame in, returns a cleaned DataFrame back"""
# remove all the columns I don't need
listColumnsIDontNeed = {"Type", "Rarity", "Language", "Signed",
"Artist Proof", "Altered Art", "Misprint", "Promo", "Textless", "My Price"}
for colName in listColumnsIDontNeed:
if colName in df.columns:
del df[colName]
# convert price to a number (dont' care about dollar sign)
df["Price"] = df["Price"].str.replace("$", "").str.replace(",", "").astype(float)
# should be fewer columns now, and price should be a float
# df.info()
return df
def readRunLog():
"""read the runLog, runlog has when-run (YYYYMMDDHHMMSS), old-file, new-file
why json? because I want to be able to sort and add elements and hierarchies and stuff if I want, and trying to work more with json"""
debug("reading the log")
dictRunLog = {}
if Path(RUN_LOG_FILE_NAME).exists():
with open(RUN_LOG_FILE_NAME, "r") as file:
dictRunLog = json.load(file)
debug("current run log: " + str(dictRunLog))
return dictRunLog
def writeRunLog(strTimestampKey, dictLogEntry):
"""write out the runLog, runlog has when-run (YYYYMMDDHHMMSS), old-file, new-file
overwriting this file every time kind of worries me, so I'm going to read the current file, merge over it with what is passed and write combined back out"""
debug("writing the log")
dictRunLog = readRunLog()
dictRunLog[strTimestampKey] = dictLogEntry
with open(RUN_LOG_FILE_NAME, "w") as file:
json.dump(dictRunLog, file, default=default_numpy)
def default_numpy(o):
if isinstance(o, (numpy.int64, numpy.int32)):
return int(o)
raise TypeError("Can't understand the object type <" + str(type(o)) + "> for object " + str(o))
def determineCompareFile(dictRunLog):
"""figure out what the right file is to compare current file to, pass in fun file dict, return a file that exists in data
the compare file should be the oldest, or the "new-file" from the last run log"""
# sort run log by old-file
dictRunLog = sorted(dictRunLog.items(), key=itemgetter(0))
runLogSize = len(dictRunLog)
# print("size run log: " + str(runLogSize)+ str(dictRunLog) + "::::" + str(dictRunLog[runLogSize-1][1]["old-file"]))
lastCompared = None
lastNew = None
# get the last item in the run log to find the last old-file
if runLogSize > 0:
lastCompared = dictRunLog[runLogSize - 1][1]["old-file"]
lastNew = dictRunLog[runLogSize - 1][1]["new-file"]
debug("LastCompared: " + str(lastCompared))
debug("LastNewFile: " + str(lastNew))
# find all the csvs in data/
listCardsCSVs = list(filter(lambda x: str(x).endswith(
"magic-cards.csv"), os.listdir(DATA_DIR_NAME)))
# sort them all, oldest to newest by file name
listCardsCSVs = sorted(listCardsCSVs)
debug("listCardsCSVs:" + str(len(listCardsCSVs)) + str(listCardsCSVs))
# find the lastCompared in list, default to -1 if no match
indexLastCompared = None
indexLastNew = None
try:
indexLastCompared = listCardsCSVs.index(lastCompared)
indexLastNew = listCardsCSVs.index(lastNew)
except ValueError:
indexLastCompared = -1
indexLastNew = -1
debug("indexLastCompared: (-1 means I've never compared this file) " + str(indexLastCompared))
debug("indexLastNew: (-1 means I've never compared this file) " + str(indexLastNew))
# if there's no last new match in the directory, use the oldest
if (indexLastNew == -1):
toCompareFileName = listCardsCSVs[0]
else:
toCompareFileName = lastNew
# Check the next card csv file up chronologically
# indexToCompare = indexLastCompared+1
# there should not be a situation where there's not a file after the last compared, but if so, just run against the latest file
# if (indexToCompare >= len(listCardsCSVs)):
# indexToCompare = len(listCardsCSVs)-1
# debug("indexToCompare" + str(indexToCompare))
# toCompareFileName = listCardsCSVs[indexToCompare]
# print("toCompareFileName: " + str(toCompareFileName))
return toCompareFileName
def configure():
"""load and return configuration dictionary from JSON and config logging"""
configureLogging()
with open(CONFIG_FILE_NAME, "r") as file:
return json.load(file)
def configureLogging():
"""set up the logging for printing to the console stream"""
logger = logging.getLogger(__name__)
print("Checking arguments, if --debug sent in as argument, then debug log level" + str(sys.argv))
if (len(sys.argv) > 1):
logLevel = sys.argv[1]
if str(logLevel).endswith("debug"):
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
print(str(logLevel))
debug("Debug level: " + str(logger.getEffectiveLevel()))
def debug(msg):
"""log a debug message so I don't have to type getLogger... a million times."""
logger = logging.getLogger(__name__)
logger.debug(msg)
def makeMushedKey(row):
""""Return a unique key based on a row, sortcat+name+edition+condition+foil+cardNumber"""
return row["SortCategory"] + "-" + row["Name"] + "-" + row["Edition"] + "-" + row["Condition"] + "-" + str(row["CardNumber"]) + "-" + str(row["IsFoil"])
def updateRowStats(row, dictStats):
"""Update count/price stats for a row; deltas are calculated from the stats dictionary with mushedKeys as key"""
key = makeMushedKey(row)
oldCount = 0.0
if | pandas.notnull(row["OldCount"]) | pandas.notnull |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-22 17:45
# @Author : erwin
import datetime
import time
import pandas as pd
from component.demo_opentsdb.opentsdb_conn import OpenTSDBClient
from machine_learning.similarity.dtw.hierarchical_helper import HierarchicalHelper
from common.pickle_helper import store_model
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
| pd.set_option('display.max_colwidth', 1000) | pandas.set_option |
#!/usr/bin/env python
# ipdb> import os; os._exit(1)
# call as: python convert_mat_to_excel.py
# =======================================
# Version 0.1
# 30 March, 2019
# michael.taylor AT reading DOT ac DOT uk
# =======================================
import os
import os.path
import glob
import optparse
from optparse import OptionParser
import sys
import numpy as np
from scipy.io import loadmat
import xarray
import pandas as pd
from pandas import Series, DataFrame, Panel
import datetime as dt
from sklearn.preprocessing import StandardScaler
import seaborn as sns; sns.set(style="darkgrid")
import matplotlib.pyplot as plt; plt.close("all")
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import urllib
def create_dataframe(file_in):
#
# LOAD MATLAB ARRAY
#
mat_dict = loadmat(file_in, squeeze_me=True)
mat_keys = sorted( mat_dict.keys() )
py_dict = { k: mat_dict[k] for k in mat_keys}
# ---------------------------
# INPUTS p_sim2 (1685790, 10)
# ---------------------------
# (1) single1: irradiance @ 305 nm [Ir1]
# (2) single2: irradiance @ 312 nm [Ir2]
# (3) single3: irradiance @ 320 nm [Ir3]
# (4) single4: irradiance @ 340 nm [Ir4]
# (5) single5: irradiance @ 380 nm [Ir5]
# (6) solar zenith angle [SZA]
# (7) day of the year [DOY
# (8) [Cos(DOY)]
# (9) [Sin(DOY)]
# (10) day of the week (DOW)
# ---------------------------
# TIME t_sim2 (1685790, 1)
# ---------------------------
# OUTPUTS y_sim2 (1685790, 6)
# ---------------------------
# (1) Ir1: vitamin D with action spectrum 1 [VitD(AS1)]
# (2) Ir2: vitamin D with action spectrum 2 [VitD(AS2)]
# (3) vitamin D with action spectrum from Ilias [VitD(AS3)]
# (4) DNA damage effective dose (DNAD)
# (5) CIE erythemal dose (CIE)
# (6) Plant Growth (PG)
# ---------------------------
# EXTRACT INPUTS:
x = py_dict['p_sim2'].T
# EXTRACT OUTPUTS:
y = py_dict['y_sim2'].T
# CONVERT MATLAB DATETIME TO PANDAS DATETIME:
# NB: 719529 is the Matlab datenum value of the Unix epoch start (1970-01-01)
t = pd.to_datetime(py_dict['t_sim2'] - 719529, unit='D')
# SELECT SAMPLE:
x = x[0:10000,:]
y = y[0:10000,:]
t = t[0:10000]
# PANDAS COLUMN HEADINGS:
x_cols = ['Ir_305','Ir_312','Ir_320','Ir_340','Ir_380','SZA','DOY','CosDOY','SinDOY','DOW']
y_cols = ['VitaminD_AS1','VitaminD_AS2','VitaminD_AS3','DNA_Damage','CIE','Plant_Growth']
cols = np.append(x_cols,y_cols)
# MERGE INPUTS & OUTPUTS INTO PANDAS DATAFRAME:
df1 = | pd.DataFrame(x, columns=x_cols, index=t) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys, getopt
import os
from os import path
import collections
def process_kp_baseline_survey(data_dictionary_filename, data_filename, output_folder):
print('input dd =', data_dictionary_filename)
print('input df =', data_filename)
print('output dir =', output_folder)
#--------------------------------------------------------------------------------
#Process data dictionary for KPDataDictionaries
dd = pd.read_csv(data_dictionary_filename)
dd = dd[['Variable Name', 'Type', 'Label', 'Valid Values (if not included in label)']]
dd = dd.rename(columns={'Variable Name': 'ElementName',
'Type': 'DataType',
'Label': 'ElementDescription',
'Valid Values (if not included in label)':'Notes'})
dd['ElementName'] = dd['ElementName'].str.lower()
replace_names = {'studyid': 'study_id',
'dem_1': 'age',
'dem_2': 'gender'}
for k,v in replace_names.items():
dd['ElementName'] = dd['ElementName'].replace(k,v)
dd['DataType'] = dd['DataType'].replace('integer','Integer')
dd['DataType'] = dd['DataType'].replace('boolean','Boolean')
for i, x in enumerate(dd["Notes"].values):
if str(x).lower().find("1,") >= 0:
dd['DataType'].values[i] = "Categorical"
dd['DataType'] = dd['DataType'].replace('','Integer')
dd = dd[dd['DataType'] != 'text']
dd.insert(2, 'Size', '')
dd.insert(3, 'Required', 'Required')
dd.insert(5, 'ValueRange', '')
dd.insert(7, 'Aliases', '')
dd = dd.dropna(subset=['ElementName'])
#--------------------------------------------------------------------------------
#Process data for KPData
df = pd.read_csv(data_filename)
df.columns = df.columns.str.strip().str.lower()
df = df.rename(columns=replace_names)
column_names = dd['ElementName'].values
df = df[column_names]
for name in list(df.columns):
if str(name).lower().find('trait') != -1:
df[name] = df[name].fillna(4.0)
df[name] = df[name].astype(float)
df[name] = df[name].replace(1.0,'1 = Disagree strongly')
df[name] = df[name].replace(2.0,'2 = Disagree moderately')
df[name] = df[name].replace(3.0,'3 = Disagree a little')
df[name] = df[name].replace(4.0,'4 = Neither agree nor disagree')
df[name] = df[name].replace(5.0,'5 = Agree a little')
df[name] = df[name].replace(6.0,'6 = Agree moderately')
df[name] = df[name].replace(7.0,'7 = Agree strongly')
#--------------------------------------------------------------------------------
#Create new csv files
output_data_dictionary = 'kp-baseline-survey.csv'
output_data = 'kp-baseline-survey-data.csv'
output_data_dictionary_path = os.path.join(output_folder, output_data_dictionary)
output_data_path = os.path.join(output_folder, output_data)
dd.to_csv(output_data_dictionary_path, index=False)
df.to_csv(output_data_path, index=False)
print('dd shape =', dd.shape)
print('df shape =', df.shape)
print('dd output =', output_data_dictionary)
print('df output =', output_data)
#--------------------------------------------------------------------------------
#Create TIPI scores
df_data = {}
for name in list(df.columns):
if str(name).lower().find('trait') != -1:
df_data[name] = df[name]
reverse_map = { '1 = Disagree strongly' : '7 = Agree strongly',
'2 = Disagree moderately' : '6 = Agree moderately',
'3 = Disagree a little' : '5 = Agree a little',
'4 = Neither agree nor disagree' : '4 = Neither agree nor disagree',
'5 = Agree a little' : '3 = Disagree a little',
'6 = Agree moderately' : '2 = Disagree moderately',
'7 = Agree strongly' : '1 = Disagree strongly' }
def reverse(df):
df = df.apply(lambda x: reverse_map[x])
return df
df_tipi = {}
df_tipi['Extraversion'] = [df_data['trait_1'].values, reverse(df_data['trait_6']).values]
df_tipi['Agreeableness'] = [reverse(df_data['trait_2']).values, df_data['trait_7'].values]
df_tipi['Conscientiousness'] = [df_data['trait_3'].values, reverse(df_data['trait_8']).values]
df_tipi['Emotional Stability'] = [reverse(df_data['trait_4']).values, df_data['trait_9'].values]
df_tipi['Openess to Experience'] = [df_data['trait_5'].values, reverse(df_data['trait_10']).values]
tipi_scores = {}
for k,v in df_tipi.items():
item0 = [int(str(x).split('=')[0]) for x in v[0]]
item1 = [int(str(x).split('=')[0]) for x in v[1]]
tipi_scores[k] = [(x+y)/2 for x, y in zip(item0, item1)]
tipi_scores = pd.DataFrame(tipi_scores).set_index(df['study_id'])
tipi_scores_filename = 'kp-baseline-survey-tipi.csv'
tipi_scores_path = os.path.join(output_folder, tipi_scores_filename)
tipi_scores.to_csv(tipi_scores_path)
print('TIPI scores =', tipi_scores_filename)
#--------------------------------------------------------------------------------
#Create IPAQ scores
ipaq_data = collections.defaultdict(list)
for name in list(df.columns):
if str(name).lower().find('ipaq') != -1:
if ((str(name).lower().find('_none') == -1) and
(str(name).lower().find('_dk') == -1) and
(str(name).lower().find('ipaq_4') == -1)):
for item in df[name].values:
if str(item).lower() == 'nan':
ipaq_data[name].append(0)
else:
item = str(item).replace('-','')
item = float(item)
ipaq_data[name].append(item)
ipaq_data[name] = np.array(ipaq_data[name])
MOVA = ipaq_data['ipaq_1'] * (ipaq_data['ipaq_1_hr'] * 60. + ipaq_data['ipaq_1_min'])
MOMA = ipaq_data['ipaq_2'] * (ipaq_data['ipaq_2_hr'] * 60. + ipaq_data['ipaq_2_min'])
MOW = ipaq_data['ipaq_3'] * (ipaq_data['ipaq_3_hr'] * 60. + ipaq_data['ipaq_3_min'])
MOTO = MOVA + MOMA + MOW
MMVA = MOVA + MOMA
MMAE = 2. * MOVA + MOMA
MMET = 8. * MOVA + 4. * MOMA + 3.3 * MOW
ipaq_scores = {'MOVA':MOVA, 'MOMA':MOMA, 'MOW':MOW, 'MOTO':MOTO, 'MMVA':MMVA, 'MMAE':MMAE, 'MMET':MMET}
ipaq_scores = | pd.DataFrame(ipaq_scores) | pandas.DataFrame |
"""
Monte Carlo-type tests for the BM model
Note that that the actual tests that run are just regression tests against
previously estimated values with small sample sizes that can be run quickly
for continuous integration. However, this file can be used to re-run (slow)
large-sample Monte Carlo tests.
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from scipy.signal import lfilter
from statsmodels.tsa.statespace import (
dynamic_factor_mq, sarimax, varmax, dynamic_factor)
def simulate_k_factor1(nobs=1000):
mod_sim = dynamic_factor.DynamicFactor(np.zeros((1, 4)), k_factors=1,
factor_order=1, error_order=1)
loadings = [1.0, -0.75, 0.25, -0.3, 0.5]
p = np.r_[loadings[:mod_sim.k_endog],
[10] * mod_sim.k_endog,
0.5,
[0.] * mod_sim.k_endog]
ix = pd.period_range(start='1935-01', periods=nobs, freq='M')
endog = pd.DataFrame(mod_sim.simulate(p, nobs), index=ix)
true = pd.Series(p, index=mod_sim.param_names)
# Compute levels series (M and Q)
ix = pd.period_range(start=endog.index[0] - 1, end=endog.index[-1],
freq=endog.index.freq)
levels_M = 1 + endog.reindex(ix) / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# This is an alternative way to compute the quarterly levels
# endog_M = endog.iloc[:, :3]
# x = endog.iloc[:, 3:]
# endog_Q = (x + 2 * x.shift(1) + 3 * x.shift(2) + 2 * x.shift(3) +
# x.shift(4)).resample('Q', convention='e').last().iloc[:-1] / 3
# levels_Q = 1 + endog.iloc[:, 3:] / 100
# levels_Q.iloc[0] = 100
# Here is another alternative way to compute the quarterly levels
# weights = np.array([1, 2, 3, 2, 1])
# def func(x, weights):
# return np.sum(weights * x)
# r = endog_M.rolling(5)
# (r.apply(func, args=(weights,), raw=False).resample('Q', convention='e')
# .last().iloc[:-1].tail())
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.iloc[:, :3].diff()
endog_Q = log_levels_Q.iloc[:, 3:].diff()
return endog_M, endog_Q, log_levels_M, log_levels_Q, true
def simulate_k_factors3_blocks2(nobs=1000, idiosyncratic_ar1=False):
# Simulate the first two factors
ix = pd.period_range(start='2000-01', periods=1, freq='M')
endog = pd.DataFrame(np.zeros((1, 2)), columns=['f1', 'f2'], index=ix)
mod_f_12 = varmax.VARMAX(endog, order=(1, 0), trend='n')
params = [0.5, 0.1, -0.2, 0.9, 1.0, 0, 1.0]
f_12 = mod_f_12.simulate(params, nobs)
# Simulate the third factor
endog = pd.Series([0], name='f3', index=ix)
mod_f_3 = sarimax.SARIMAX(endog, order=(2, 0, 0))
params = [0.7, 0.1, 1.]
f_3 = mod_f_3.simulate(params, nobs)
# Combine the factors
f = pd.concat([f_12, f_3], axis=1)
# Observed variables
k_endog = 8
design = np.zeros((k_endog, 3))
design[0] = [1.0, 1.0, 1.0]
design[1] = [0.5, -0.8, 0.0]
design[2] = [1.0, 0.0, 0.0]
design[3] = [0.2, 0.0, -0.1]
design[4] = [0.5, 0.0, 0.0]
design[5] = [-0.2, 0.0, 0.0]
design[6] = [1.0, 1.0, 1.0]
design[7] = [-1.0, 0.0, 0.0]
rho = np.array([0.5, 0.2, -0.1, 0.0, 0.4, 0.9, 0.05, 0.05])
if not idiosyncratic_ar1:
rho *= 0.0
eps = [lfilter([1], [1, -rho[i]], np.random.normal(size=nobs))
for i in range(k_endog)]
endog = (design @ f.T).T + eps
endog.columns = [f'y{i + 1}' for i in range(k_endog)]
# True parameters
tmp1 = design.ravel()
tmp2 = np.linalg.cholesky(mod_f_12['state_cov'])
tmp3 = rho if idiosyncratic_ar1 else []
true = np.r_[
tmp1[tmp1 != 0],
mod_f_12['transition', :2, :].ravel(),
mod_f_3['transition', :, 0],
tmp2[np.tril_indices_from(tmp2)],
mod_f_3['state_cov', 0, 0],
tmp3,
[1] * k_endog
]
# Compute levels series (M and Q)
ix = pd.period_range(endog.index[0] - 1, endog.index[-1], freq='M')
levels_M = 1 + endog.reindex(ix) / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.iloc[:, :7].diff().iloc[1:]
endog_Q = log_levels_Q.iloc[:, 7:].diff().iloc[2:]
# Specification
factor_names = np.array(['global', 'second', 'third'])
factors = {endog.columns[i]: factor_names[design[i] != 0]
for i in range(k_endog)}
factor_orders = {
('global', 'second'): 1,
'third': 2,
}
return (endog_M, endog_Q, log_levels_M, log_levels_Q, factors,
factor_orders, true, f)
@pytest.mark.skip(reason="Monte carlo test, very slow, kept for manual runs")
def test_k_factor1(reset_randomstate):
# Fitted parameters for np.random.seed(1234) replicate the true parameters
# pretty well (flipped signs on loadings are just from usual factor sign
# identification issue):
# True Fitted
# loading.0->0 1.00 -0.98
# loading.0->1 -0.75 0.75
# loading.0)->2 0.25 -0.24
# loading.0->3 -0.30 0.31
# L1.0->0 0.50 0.50
# sigma2.0 10.00 10.07
# sigma2.1 10.00 10.06
# sigma2.2 10.00 9.94
# sigma2.3 10.00 11.60
np.random.seed(1234)
endog_M, endog_Q, _, _, true_params, _ = simulate_k_factor1(nobs=100000)
mod = dynamic_factor_mq.DynamicFactorMQ(
endog_M, endog_quarterly=endog_Q, factors=1, factor_orders=1,
idiosyncratic_ar1=False)
# Fit the model with L-BFGS. Because the model doesn't impose identifying
# assumptions on the factors, here we force identification by fixing the
# factor error variance to be unity
with mod.fix_params({'fb(0).cov.chol[1,1]': 1.}):
mod.fit(method='lbfgs', disp=False)
def gen_k_factor1_nonstationary(nobs=1000, k=1, idiosyncratic_ar1=False,
idiosyncratic_var=0.4, k_ar=1):
# Simulate univariate random walk
ix = pd.period_range(start='1950-01', periods=1, freq='M')
faux = pd.Series([0], index=ix)
mod = sarimax.SARIMAX(faux, order=(k_ar, 0, 0), initialization='diffuse')
params = np.r_[[0] * (k_ar - 1), [1.0001], 1.0]
factor = mod.simulate(params, nobs)
if idiosyncratic_ar1:
mod_idio = sarimax.SARIMAX(faux, order=(1, 0, 0))
endog = pd.concat([
factor + mod_idio.simulate([0.7, idiosyncratic_var], nobs)
for i in range(2 * k)], axis=1)
else:
endog = pd.concat([
factor + np.random.normal(scale=idiosyncratic_var**0.5, size=nobs)
for i in range(2 * k)], axis=1)
# Construct the quarterly variable
levels_M = 1 + endog / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.diff().iloc[1:, :k]
if k > 1:
endog_M.columns = ['yM%d_f1' % (i + 1) for i in range(k)]
else:
endog_M.columns = ['yM_f1']
endog_Q = log_levels_Q.diff().iloc[1:, k:]
if k > 1:
endog_Q.columns = ['yQ%d_f1' % (i + 1) for i in range(k)]
else:
endog_Q.columns = ['yQ_f1']
return endog_M, endog_Q, factor
def test_em_nonstationary(reset_randomstate):
# Test that when the EM algorithm estimates non-stationary parameters, that
# it warns the user and switches to a diffuse initialization.
ix = pd.period_range(start='2000', periods=20, freq='M')
endog_M = pd.Series(np.arange(20), index=ix)
endog_M.iloc[10:12] += [0.4, -0.2] # add in a little noise
ix = pd.period_range(start='2000', periods=5, freq='Q')
endog_Q = pd.Series(np.arange(5), index=ix)
mod = dynamic_factor_mq.DynamicFactorMQ(
endog_M, endog_quarterly=endog_Q, idiosyncratic_ar1=False,
standardize=False, factors=['global'])
msg = ('Non-stationary parameters found at EM iteration 1, which is not'
' compatible with stationary initialization. Initialization was'
r' switched to diffuse for the following: \["factor block:'
r' \(\'global\',\)"\], and fitting was restarted.')
with pytest.warns(UserWarning, match=msg):
return mod.fit(maxiter=2, em_initialization=False)
def gen_k_factor1(nobs=10000, k=1, idiosyncratic_ar1=False,
idiosyncratic_var=0.4, k_ar=6):
# Simulate univariate AR(6)
ix = pd.period_range(start='1950-01', periods=1, freq='M')
faux = pd.Series([0], index=ix)
mod = sarimax.SARIMAX(faux, order=(k_ar, 0, 0))
params = np.r_[[0] * (k_ar - 1), [0.5], 1.0]
factor = mod.simulate(params, nobs)
if idiosyncratic_ar1:
mod_idio = sarimax.SARIMAX(faux, order=(1, 0, 0))
endog = pd.concat([
factor + mod_idio.simulate([0.7, idiosyncratic_var], nobs)
for i in range(2 * k)], axis=1)
else:
endog = pd.concat([
factor + np.random.normal(scale=idiosyncratic_var**0.5, size=nobs)
for i in range(2 * k)], axis=1)
# Construct the quarterly variable
levels_M = 1 + endog / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.diff().iloc[1:, :k]
if k > 1:
endog_M.columns = ['yM%d_f1' % (i + 1) for i in range(k)]
else:
endog_M.columns = ['yM_f1']
endog_Q = log_levels_Q.diff().iloc[1:, k:]
if k > 1:
endog_Q.columns = ['yQ%d_f1' % (i + 1) for i in range(k)]
else:
endog_Q.columns = ['yQ_f1']
return endog_M, endog_Q, factor
@pytest.mark.filterwarnings("ignore:Log-likelihood decreased")
def test_k_factor1_factor_order_6(reset_randomstate):
# This tests that the model is correctly set up when the lag order of the
# factor is longer than 5 and we have a single factor. This is important
# because 5 lags are always present when there is quarterly data, but we
# want to check that, for example, we haven't accidentally relied on there
# being exactly 5 lags available.
# Note: as of 2020/07/25, the FRBNY code does not seem to work for 6 lags,
# so we can't test against their code
# Note: the case with only 100 nobs leads to issues with the EM algorithm
# and a decrease in the log-likelihood
# There is a description of the results from
# a run with nobs=10000 that are a better indication of the model finding
# the correct parameters.
endog_M, endog_Q, _ = gen_k_factor1(
nobs=100, idiosyncratic_var=0.0)
# Construct and fit the model
mod = dynamic_factor_mq.DynamicFactorMQ(
endog_M, endog_quarterly=endog_Q,
factor_orders=6,
idiosyncratic_ar1=False, standardize=False)
mod.fit()
# From a run with 10000 observations, we get:
# This results in the following fitted coefficients
# True Fitted
# loading.0->y1 1.00 1.15
# loading.0->y2 1.00 1.15
# L1.0->0 0.00 -0.01
# L2.0->0 0.00 -0.01
# L3.0->0 0.00 0.01
# L4.0->0 0.00 0.01
# L5.0->0 0.00 -0.00
# L6.0->0 0.50 0.50
# fb(0).cov.chol[1,1] 1.00 0.87
# sigma2.y1 0.00 -0.00
# sigma2.y2 0.00 0.00
#
# Note that the fitted values are essentially exactly right, once we
# account for the lack of factor identification. In particular, if we
# normalize the loadings to one, then the estimated factor error variance
# is (0.87 * 1.15)**2 = 1.0, as desired.
def gen_k_factor2(nobs=10000, k=2, idiosyncratic_ar1=False,
idiosyncratic_var=0.4, k_ar=6):
# Simulate bivariate VAR(6) for the factor
ix = pd.period_range(start='1950-01', periods=1, freq='M')
faux = pd.DataFrame([[0, 0]], index=ix,
columns=['f1', 'f2'])
mod = varmax.VARMAX(faux, order=(k_ar, 0), trend='n')
A = np.zeros((2, 2 * k_ar))
A[:, -2:] = np.array([[0.5, -0.2],
[0.1, 0.3]])
Q = np.array([[1.5, 0.2],
[0.2, 0.5]])
L = np.linalg.cholesky(Q)
params = np.r_[A.ravel(), L[np.tril_indices_from(L)]]
# Simulate the factors
factors = mod.simulate(params, nobs)
# Add in the idiosyncratic part
faux = pd.Series([0], index=ix)
mod_idio = sarimax.SARIMAX(faux, order=(1, 0, 0))
phi = [0.7, -0.2] if idiosyncratic_ar1 else [0, 0.]
tmp = factors.iloc[:, 0] + factors.iloc[:, 1]
# Monthly variables
endog_M = pd.concat([tmp.copy() for i in range(k)], axis=1)
columns = []
for i in range(k):
endog_M.iloc[:, i] = (
endog_M.iloc[:, i] +
mod_idio.simulate([phi[0], idiosyncratic_var], nobs))
columns += [f'yM{i + 1}_f2']
endog_M.columns = columns
# Monthly versions of quarterly variables
endog_Q_M = pd.concat([tmp.copy() for i in range(k)], axis=1)
columns = []
for i in range(k):
endog_Q_M.iloc[:, i] = (
endog_Q_M.iloc[:, i] +
mod_idio.simulate([phi[0], idiosyncratic_var], nobs))
columns += [f'yQ{i + 1}_f2']
endog_Q_M.columns = columns
# Create quarterly versions of quarterly variables
levels_M = 1 + endog_Q_M / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
# log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# Compute the quarterly growth rate series
endog_Q = log_levels_Q.diff()
return endog_M, endog_Q, factors
@pytest.mark.skip(reason="Monte carlo test, very slow, kept for manual runs")
def test_k_factor2_factor_order_6(reset_randomstate):
# This tests that the model is correctly set up when the lag order of the
# factor is longer than 5 and we have two factors. This is important
# because 5 lags are always present when there is quarterly data, but we
# want to check that, for example, we haven't accidentally relied on there
# being exactly 5 lags available.
# Note: as of 2020/07/25, the FRBNY code does not seem to work for 6 lags,
# so we can't test against their code
endog_M, endog_Q, factors = gen_k_factor2()
# Add the factors in to endog_M, which will allow us to identify them,
# since endog_M and endog_Q are all the same linear combination of the
# factors
endog_M_aug = pd.concat([factors, endog_M], axis=1)
mod = dynamic_factor_mq.DynamicFactorMQ(
endog_M_aug, endog_quarterly=endog_Q,
factor_multiplicities=2, factor_orders=6,
idiosyncratic_ar1=False, standardize=False)
res = mod.fit()
# The identification for the VAR system means that it is harder to visually
# check that the estimation procedure produced good estimates.
# This is the invertible matrix that we'll use to transform the factors
# and parameter matrices into the original form
M = np.kron(np.eye(6), mod['design', :2, :2])
Mi = np.linalg.inv(M)
# Get the estimated parameter matrices
Z = mod['design', :, :12]
A = mod['transition', :12, :12]
R = mod['selection', :12, :2]
Q = mod['state_cov', :2, :2]
RQR = R @ Q @ R.T
# Create the transformed matrices
Z2 = Z @ Mi
A2 = M @ A @ Mi
Q2 = (M @ RQR @ M.T)
# In this example, both endog_M and endog_Q are equal to the factors,
# so we expect the loading matrix to look like, which can be confirmed
# (up to some numerical precision) by printing Z2
# [ I 0 0 0 0 ]
# [ I 2I 3I 2I I ]
print(Z2.round(2))
desired = np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0, 0],
[1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0, 0]])
assert_allclose(Z2, desired, atol=0.1)
# Confirm that this is approximately:
# [ 0 0 0 0 0 0 0 0 0 0 0.5 -0.2 ]
# [ 0 0 0 0 0 0 0 0 0 0 0.1 0.3 ]
print(A2.round(2))
desired = np.array([
[0, 0, 0.02, 0, 0.01, -0.03, 0.01, 0.02, 0, -0.01, 0.5, -0.2],
[0, 0, 0, 0.02, 0, -0.01, 0, 0, 0, 0.01, 0.1, 0.3],
[1., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1., 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1., 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1., 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1., 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1., 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1., 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1., 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1., 0, 0]])
assert_allclose(A2, desired, atol=1e-2)
# Confirm that this is approximately:
# [ 1.5 0.2 ]
# [ 0.2 0.5 ]
# in the top left corner, and then zeros elsewhere
print(Q2.round(2))
desired = np.array([[1.49, 0.21],
[0.21, 0.49]])
assert_allclose(Q2[:2, :2], desired, atol=1e-2)
assert_allclose(Q2[:2, 2:], 0, atol=1e-2)
assert_allclose(Q2[2:, :2], 0, atol=1e-2)
assert_allclose(Q2[2:, 2:], 0, atol=1e-2)
# Finally, check that after the transformation, the factors are equal to
# endog_M
a = res.states.smoothed
a2 = (M @ a.T.iloc[:12]).T
assert_allclose(endog_M.values, a2.iloc[:, :2].values, atol=1e-10)
@pytest.mark.skip(reason="Monte carlo test, very slow, kept for manual runs")
def test_two_blocks_factor_orders_6(reset_randomstate):
# This tests that the model is correctly set up when the lag order of the
# factor is longer than 5 and we have two blocks of factors, one block with
# a single factor and one block with two factors.
# For the results below, we use nobs=1000, since nobs=10000 takes a very
# long time and a large amount of memory. As a result, the results below
# are noisier than they could be, although they still provide pretty good
# evidence that the model is performing as it should
nobs = 1000
idiosyncratic_ar1 = True
k1 = 3
k2 = 10
endog1_M, endog1_Q, f1 = gen_k_factor1(
nobs, k=k1, idiosyncratic_ar1=idiosyncratic_ar1)
endog2_M, endog2_Q, f2 = gen_k_factor2(
nobs, k=k2, idiosyncratic_ar1=idiosyncratic_ar1)
endog_M = | pd.concat([endog1_M, f2, endog2_M], axis=1) | pandas.concat |
'''
Plots for my first-year (and beyond) figurative violence in the media project.
Author: <NAME> <<EMAIL>>
Date: April 01, 2017
'''
import matplotlib.pyplot as plt
import matplotlib.dates as pltdates
import pandas as pd
import seaborn as sns
from datetime import date, datetime, timedelta
from .analysis import relative_likelihood
from projects.common.analysis import (
daily_frequency, daily_metaphor_counts
)
CUR_PAL = sns.color_palette()
# for 8.5x11 paper
DEFAULT_FIGSIZE = (7.5, 5)
def by_network_frequency_figure(
frequency_df,
date_range=pd.date_range(
'2016-09-01', '2016-11-30', freq='D'
),
iatv_corpus_name=None,
freq=True,
partition_infos=None,
font_scale=1.15,
save_path=None):
sns.axes_style("darkgrid")
sns.set(font_scale=font_scale)
CUR_PAL = sns.color_palette()
df = frequency_df
# fits are not being shown for this condition
if (partition_infos is None):
if freq:
network_freq = daily_frequency(
df, date_range, iatv_corpus_name, by=['network']
)
network_freq.plot(style='o')
else:
full_df = daily_metaphor_counts(
df, ['network'], date_range
)[['MSNBCW', 'CNNW', 'FOXNEWSW']]
full_df.plot(style='o')
# show fits TODO Include more arguments so that fits don't have to be
# generated just to plot. Generate fits outside and pass fits in.
else:
if freq:
# put networks in desired order, left to right
networks = ['MSNBCW', 'CNNW', 'FOXNEWSW']
network_freq = daily_frequency(
df, date_range, iatv_corpus_name, by=['network']
)
ax = network_freq[networks].plot(
style='o', ms=14, alpha=0.5, legend=False,
figsize=DEFAULT_FIGSIZE
)
for net_idx, network in enumerate(networks):
pinfo = partition_infos[network]
day_td = timedelta(seconds=60)
d0 = date_range[0]
d1 = pinfo.partition_date_1 - day_td
d2 = pinfo.partition_date_1
d3 = pinfo.partition_date_2
d4 = pinfo.partition_date_2 + day_td
d5 = date_range[-1]
fg = pinfo.f_ground
fe = pinfo.f_excited
dates = | pd.DatetimeIndex([d0, d1, d2, d3, d4, d5]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert | lib.infer_dtype(arr) | pandas._libs.lib.infer_dtype |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = | pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@file:utils.py
@time:2019/6/1 21:57
@author:Tangj
@software:Pycharm
@Desc
"""
import os
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
import random
import pandas as pd
def frame_to_dict(train):
train_dict = {}
for col in train.columns:
train_dict[col] = train[col].columns
return trian_dict
def del_adSize(ad_Size):
ad_size_mean = []
ad_size_max = []
ad_size_min = []
for adSize in ad_Size:
if not isinstance(adSize, str):
# print(adSize)
ad_size_mean.append(adSize)
ad_size_max.append(adSize)
ad_size_min.append(adSize)
continue
size = adSize.split(',')
s = []
for i in size:
s.append(int(i))
ad_size_mean.append(np.mean(s))
ad_size_max.append(np.max(s))
ad_size_min.append(np.min(s))
return ad_size_mean, ad_size_max, ad_size_max
def write_data_into_parts(data, root_path, nums=5100000):
l = data.shape[0] // nums
for i in range(l + 1):
begin = i * nums
end = min(nums * (i + 1), data.shape[0])
t_data = data[begin:end]
t_data.tofile(root_path + '.bin')
def write_dict(path, data):
fw = open(path, 'w')
for key in data:
fw.write(str(key) + ',' + str(data[key]) + '\n')
fw.close()
def read_allfea(path):
f = open(path, 'r')
fea = '0'
for i in f:
fea = i
fea_val = fea.split(',')
index_dict = {}
for i, fea in enumerate(fea_val):
index_dict[fea] = i + 1
if '-1' not in index_dict:
index_dict['-1'] = len(fea_val)
return fea, index_dict
def one_hot_feature_concat(train, test, fea1, fea2, filter_num=100):
train1 = train[fea1].values
train2 = train[fea2].values
test1 = test[fea1].values
test2 = test[fea2].values
train_data = []
test_data = []
train_res = []
test_res = []
for i, values in enumerate(train1):
new = str(values) + '|' + str(train2[i])
train_data.append(new)
for i, values in enumerate(test1):
new = str(values) + '|' + str(test2[i])
# print(new)
test_data.append(new)
count_dict = {}
for d in train_data:
if d not in count_dict:
count_dict[d] = 0
count_dict[d] += 1
filter_set = []
for i in count_dict:
if count_dict[i] < 1:
filter_set.append(i)
index_dict = {}
begin_index = 1
for d in train_data:
# 给出现的value赋予一个index指引
if d in filter_set:
d = '-1'
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
train_res.append(index_dict[d])
if '-1' not in index_dict:
index_dict['-1'] = begin_index
for d in test_data:
if d not in index_dict or d in filter_set:
d = '-1'
test_res.append(index_dict[d])
print(test_res)
return np.array(train_res), np.array(test_res)
def one_hot_feature_process(train_data, val_data, test2_data, begin_num, filter_num=0):
index_dict = {}
begin_index = begin_num
train_res = []
for d in train_data:
# print(d)
# 给出现的value赋予一个index指引
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
# print(index_dict[d])
train_res.append(index_dict[d])
if '-1' not in index_dict:
index_dict['-1'] = begin_index
val_res = []
for d in val_data:
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
val_res.append(index_dict[d])
test2_res = []
for d in test2_data:
if d not in index_dict:
d = '-1'
test2_res.append(index_dict[d])
# print(np.array(train_res))
return np.array(train_res), np.array(val_res), np.array(test2_res), index_dict
def vector_feature_process(train_data, val_data, test2_data, begin_num, max_len, index_dict):
train_res = []
train_res2 = []
val_res2 = []
test2_res2 = []
train_rate = []
val_rate = []
test2_rate = []
for d in train_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
train_res.append(row)
row2 = [1] * max_len
train_res2.append(row2)
train_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
train_res.append(row)
train_res2.append(row2)
train_rate.append(len(lx) / max_len)
val_res = []
for d in val_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
val_res.append(row)
row2 = [1] * max_len
val_res2.append(row2)
val_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
val_res.append(row)
val_res2.append(row2)
val_rate.append(len(lx) / max_len)
test2_res = []
for d in test2_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
test2_res.append(row)
row2 = [1] * max_len
test2_res2.append(row2)
test2_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
test2_res.append(row)
test2_res2.append(row2)
test2_rate.append(len(lx) / max_len)
return np.array(train_res), np.array(val_res), np.array(test2_res), index_dict, np.array(train_res2), np.array(
val_res2), np.array(test2_res2), np.array(train_rate), np.array(val_rate), np.array(test2_rate),
def count_one_feature_times(train, test, fea):
count_dict = {}
test_res = []
train_res = []
for val in train[fea].values:
if val not in count_dict:
count_dict[val] = 0
count_dict[val] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for i in train[fea].values:
train_res.append(count_dict[i])
for i in test:
if i not in count_dict:
i = '-1'
test_res.append(count_dict[i])
return np.array(train_res), np.array(test_res)
def count_vector_feature_times(train, val_data, test, fea):
count_dict = {}
val_res = []
test_res = []
train_res = []
Train = pd.concat([train, val_data])
for val in Train[fea].values:
vals = val.split(',')
for i in vals:
if i not in count_dict:
count_dict[i] = 0
count_dict[i] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for val in train[fea].values:
vals = val.split(',')
l = []
for i in vals:
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
train_res.append([max_l, mean_l, min_l, median_l])
for val in val_data[fea].values:
vals = val.split(',')
l = []
for i in vals:
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
val_res.append([max_l, mean_l, min_l, median_l])
for val in test:
vals = val.split(',')
l = []
for i in vals:
if i not in count_dict:
i = '-1'
l.append(count_dict[i])
# ['max', 'mean', 'min', 'median']
max_l = np.max(l)
mean_l = np.mean(l)
min_l = np.min(l)
median_l = np.median(l)
test_res.append([max_l, mean_l, min_l, median_l])
return np.array(train_res), np.array(val_res), np.array(test_res)
# 对曝光、pctr和ecpm和bid的特征
def one_feature_exposure2(Train, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
num1 = train[train['day'] == 20190410].shape[0]
id_res.extend(train[train['day'] == 20190410]['ad_id'].values)
reqday_res.extend(train[train['day'] == 20190410]['day'].values)
for i in range(num1):
train_res.append([0, 0, 0, 0])
for i in range(len(date) - 1):
day = int(date[i + 1])
train_compute = Train[Train['day'] == day]
train_count = Train[Train['day'] < day]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_count = Train[Train['day'] > 20190414]
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
for value in test:
if value not in exposure_dict:
value = '-1'
test_res.append(exposure_dict[value])
return np.array(train_res), np.array(test_res), np.array(id_res), np.array(reqday_res)
def one_feature_exposure4(Train, test, fea, date):
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
train_count = train[train['day'] == 20190410]
train_compute = train[train['day'] == 20190410]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_count = train[train['day'] == 20190410]
train_compute = train[train['day'] == 20190411]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
for i in range(len(date) - 2):
day1 = int(date[i + 2])
day2 = int(date[i + 1])
day3 = int(date[i])
train1 = Train[Train['day'] == day3]
train2 = Train[Train['day'] == day2]
train_compute = Train[Train['day'] == day1]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['day'].values)
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train1 = train[train['day'] == 20190421]
train2 = train[train['day'] == 20190422]
train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
# print(train_count[train_count[fea] == value].shape[0])
train1 = train_count[train_count[fea] == value]['ex'].values
exposure_dict[value] = []
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0.9, 0.9]
num_dis = 0
for value in test:
# print(value)
if value not in exposure_dict:
num_dis += 1
value = '-1'
test_res.append(exposure_dict[value])
print(num_dis)
return np.array(train_res), np.array(test_res), \
np.array(id_res), np.array(reqday_res)
def one_feature_exposure3(Train, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
test_res = []
train_res = []
id_res = []
reqday_res = []
train = Train
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190410]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# train_count = train[train['day'] == 20190410]
# train_compute = train[train['day'] == 20190411]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value] = []
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
#
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
#
# for i in range(len(date) - 2):
# day1 = int(date[i + 2])
# day2 = int(date[i + 1])
# day3 = int(date[i])
#
# train1 = Train[Train['day'] == day3]
# train2 = Train[Train['day'] == day2]
# train_compute = Train[Train['day'] == day1]
# id_res.extend(train_compute['ad_id'].values)
# reqday_res.extend(train_compute['day'].values)
# train_count = pd.concat([train1, train2])
# exposure_dict = {}
# for value in train_count[fea].values:
# if value not in exposure_dict:
# exposure_dict[value] = []
# train1 = train_count[train_count[fea] == value]['sucess_rate'].values
# exposure_dict[value].append(np.max(train1))
# exposure_dict[value].append(np.min(train1))
# exposure_dict[value].append(np.mean(train1))
# exposure_dict[value].append(np.median(train1))
#
# if '-1' not in exposure_dict:
# exposure_dict['-1'] = [0, 0, 0, 0]
# for value in train_compute[fea].values:
# if value not in exposure_dict:
# value = '-1'
# train_res.append(exposure_dict[value])
# train1 = train[train['day'] == 20190421]
train_count = train[train['day'] == 20190422]
# train_count = pd.concat([train1, train2])
exposure_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['sucess_rate'].values
exposure_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [0, 0, 0, 0]
num_dis = 0
for value in test:
# print(value)
if value not in exposure_dict:
num_dis += 1
value = '-1'
test_res.append(exposure_dict[value])
print(num_dis)
return np.array(train_res), np.array(test_res), \
np.array(id_res), np.array(reqday_res)
def one_feature_exposure(train, val, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
val_res = []
test_res = []
train_res = []
val_res2 = []
test_res2 = []
train_res2 = []
train_res3 = []
id_res = []
reqday_res = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '02_16'].shape[0]
id_res.extend(train[train['Reqday'] == '02_16']['ad_id'].values)
reqday_res.extend(train[train['Reqday'] == '02_16']['Reqday'].values)
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
train_count = train[train['Reqday'] == '02_16']
train_compute = train[train['Reqday'] == '02_17']
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['Reqday'].values)
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value] = []
bid_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res2.append(bid_dict[value])
for i in range(len(date) - 2):
day1 = date[i + 2]
day2 = date[i + 1]
day3 = date[i]
train1 = Train[Train['Reqday'] == day3]
train2 = Train[Train['Reqday'] == day2]
train_compute = Train[Train['Reqday'] == day1]
id_res.extend(train_compute['ad_id'].values)
reqday_res.extend(train_compute['Reqday'].values)
train_count = | pd.concat([train1, train2]) | pandas.concat |
import altair as alt
import pandas as pd
from sys import argv
import numpy as np
df = | pd.read_csv(argv[1], keep_default_na=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Global imports
from __future__ import unicode_literals
import os
import glob
import pickle
import argparse as ap
import pandas as pd
import numpy as n
import multiprocessing as mp
# Plot imports
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("tkagg")
import seaborn as sns
# Local imports
from PELEParseReports import *
# Script information
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class PELEAnalyzer():
def __init__(self):
self.reports, self.output_path, self.threshold, self.column_number, self.window_size, self.catalytic_event,\
self.to_drop, self.n_of_ester_groups, self.add_histidine, self.cysteine, self.threshold_histidine,\
self.catalytic_distance, self.verbose, self.perform_plots, self.violin_plots,\
self.num_steps, self.n_processors,self.analysis = self.parseArgs()
def parseArgs(self):
"""
Parse arguments from command-line
RETURNS
-------
reports : string
list of report files to look for data
output_path : string
output directory where the csv file will be saved
threshold: float
threshold for the desired metric
column_number: integer
index of the column the desired metric
window_size: integer
Number of steps to consider an entrance
num_steps: ineger
Number of steps per PELE epoch
n_processors: integer
Number of processors
analysis: string
Analysis to perform
"""
parser = ap.ArgumentParser(description='Script that performs different analysis of the \
metrics of the reports file from a PELE simulation.')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument("-i", "--input", required=True, metavar="FILE",
type=str, nargs='*', help="path to report files")
optional.add_argument("-o", "--output", metavar="PATH", type=str,
help="output path to save figure", default="PELE_results")
optional.add_argument("-TE","--threshold", metavar="THRESHOLD",type=float,
help="threshold for the desired metric", default = 4.0)
optional.add_argument("-C","--column_number", metavar="INTEGER",type=int,
help="index of the column where the desired metrics resides", default=7)
optional.add_argument("-W","--window_size", metavar="INTEGER",type=int,
help="number of steps between being out and entering the threshold to consider entrance", default = 2)
optional.add_argument("-CE","--catalytic_event", metavar="LIST",type=str,
nargs='*',help="index of the column where catalytic distances reside (they must be 3)")
optional.add_argument("-TD","--to_drop", metavar="LIST",type=str,
nargs='*',help="column names that want to be dropped", default=[])
optional.add_argument("-NE","--n_of_ester_groups", metavar="INTEGER",type=int,
help="number of ester groups in the substrate used in the PELE simulation", default = 1)
optional.add_argument("-AH","--add_histidine",
help="Add the catalytic histidine - ether O atom(s) of the substrate to the count of catalytic events",
action="store_true")
optional.add_argument("-CS","--cysteine",
help="The code takes into account that the nucleophile residue is a cysteine for the catalytic events",
action="store_true")
optional.add_argument("-TH","--threshold_histidine", metavar="THRESHOLD",type=float,
help="threshold for the distance between the N atom of the His and the ether O atom of the substrate to consider as catalytic", default = 6.5)
optional.add_argument("-CD","--catalytic_distance", metavar="THRESHOLD",type=float,
help="threshold for the hydrogen bonds of the catalytic residues", default = 3.5)
optional.add_argument("-V","--verbose",
help="Activate the verbose mode", action="store_true")
optional.add_argument("-PP","--perform_plots",
help="Saves the plots of the calculated metrics/parameters", action="store_true")
optional.add_argument("-VP","--violin_plots",
help="Perform violin plots instead of box plots", action="store_true")
optional.add_argument("-NS","--num_steps", metavar="INTEGER",type=int,
help="number of steps per report", default = 40)
optional.add_argument("-NP","--n_processors", metavar="INTEGER",type=int,
help="number of processors to execute the code", default = 4)
optional.add_argument("-A","--analysis", metavar="STRING",type=str,
help="Type of analysis to perform", default="CATE")
parser._action_groups.append(optional)
args = parser.parse_args()
reports = parseReports(args.input, parser)
if args.catalytic_event is not None:
args.catalytic_event = [int(i)-1 for i in args.catalytic_event]
return reports, args.output, args.threshold, int(args.column_number)-1, args.window_size, \
args.catalytic_event, args.to_drop, args.n_of_ester_groups, args.add_histidine, args.cysteine,\
args.threshold_histidine, args.catalytic_distance, args.verbose, args.perform_plots,\
args.violin_plots, args.num_steps, args.n_processors, args.analysis
def DecompressList(self,l_of_lists):
"""
This method decompress a
list of lists into a list
PARAMETERS
----------
l_of_lists: list of lists
List of lists
RETURNS
-------
new_list: list
New list
"""
new_list = []
for sublist in l_of_lists:
for item in sublist:
new_list.append(item)
return new_list
def Calculate_total_catalytic_events(self, set_of_thresholds, report):
"""
Helper function to calculate the catalytic events out of the total PELE steps
whether are accepted or rejected
PARAMETERS
----------
set_of_thresholds: list of booleans
List of booleans
RETURNS
-------
Total_CE: The total number of catalytic events in the report file
Integer/Float
"""
Total_CE = 0
for i,row in enumerate(set_of_thresholds):
if i>0:
if row==True and i != len(set_of_thresholds)-1 and set_of_thresholds[i-1]==True:
Total_CE += (report.loc[i][1]-report.loc[i-1][1])
elif row==False and set_of_thresholds[i-1]==True:
Total_CE += (report.loc[i][1]-report.loc[i-1][1])-1
elif row==True and i == len(set_of_thresholds)-1:
Total_CE += (self.num_steps-report.loc[i][1])
else:
continue
else:
continue
return Total_CE
def Catalytic_events_and_means(self, report):
"""
Take the PELE simulation report files and obtains the number of steps with
a distance (metric) smaller than a threshold and the number of times in the
simulation the the value of the desired metric gets lower than the threshold
RETURNS
-------
instances: integer
Number of inside steps
entrance: integer
Number of entrances
"""
values_aux, cat_events, cat_trajectories, total_catalytic_events = [], [], [], []
rep = pd.read_csv(report,sep=" ")
rep.dropna(axis=1,inplace=True)
values_aux.append(rep.values.tolist())
for i in range(self.n_of_ester_groups):
if self.add_histidine:
if i == 0:
if self.cysteine:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[0]+i+1]] <= self.threshold_histidine) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
(rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance)
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
else:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[0]+i+1]] <= self.threshold_histidine) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
((rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance) \
| (rep[rep.columns[self.catalytic_event[2]+1]] <= self.catalytic_distance))
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
else:
if self.cysteine:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+2^i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[0]+2**i+1]] <= self.threshold_histidine) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
(rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance)
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
else:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+2^i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[0]+2**i+1]] <= self.threshold_histidine) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
((rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance) \
| (rep[rep.columns[self.catalytic_event[2]+1]] <= self.catalytic_distance))
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
else:
if self.cysteine:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
(rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance)
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
else:
set_of_thresholds = (rep[rep.columns[self.catalytic_event[0]+i]] <= self.threshold) & \
(rep[rep.columns[self.catalytic_event[1]]] <= self.catalytic_distance) & \
((rep[rep.columns[self.catalytic_event[2]]] <= self.catalytic_distance) \
| (rep[rep.columns[self.catalytic_event[2]+1]] <= self.catalytic_distance))
CATE = rep[set_of_thresholds]
Total_CE = self.Calculate_total_catalytic_events(set_of_thresholds, rep)
CE = CATE.shape[0]
cat_events.append(CE)
total_catalytic_events.append(Total_CE)
if CE!=0:
cat_trajectories.append(1)
if self.verbose:
print("{} --> {}".format(report,list(CATE["numberOfAcceptedPeleSteps"])))
else:
cat_trajectories.append(0)
return values_aux, cat_events, cat_trajectories, rep.shape[0], total_catalytic_events
def CalculateFreeEnergy(self, report):
"""
Take the PELE simulation report files and returns the estimated difference
in the free energy of two differentiated states according to a/some metric/s.
OUTPUT
------
output_path.csv File with the difference in the binding energies and the
number of accepted PELE steps of each state.
"""
G1, G2 = [],[]
rep = pd.read_csv(report,sep=" ")
rep.dropna(axis=1,inplace=True)
for i_row in range(rep.shape[0]):
if rep.loc[i_row][self.column_number]<=self.threshold:
# if rep.loc[i_row][self.column_number]<=self.threshold and rep.loc[i_row][5]<=0.2:
G1.append(rep.loc[i_row][4])
else:
G2.append(rep.loc[i_row][4])
return G1, G2
def EstimateEnantioselectivity(self, report):
"""
Take the PELE simulation report files and filters the number of steps with
a distance (metric) smaller than a certain threshold
RETURNS
-------
Rep: pandas DataFrame
Report file with the steps that have the metric below the threshold
"""
rep = | pd.read_csv(report, sep=" ") | pandas.read_csv |
# -*- coding: utf-8 -*-
import datetime
import time
from sqlalchemy.sql import func
import pandas as pd
import math
data=pd.read_csv('上证50_daily.csv',index_col=0)
t=data.index
f=0
jiange=30
res=[]
import datetime
# def get_day_nday_ago(date,n):
# t = time.strptime(date, "%Y-%m-%d")
# y, m, d = t[0:3]
# Date = str(datetime.datetime(y, m, d) - datetime.timedelta(n)).split()
# return Date[0]
def get_day_nday_ago(date,n):
t = time.strptime(date, "%Y-%m-%d")
y, m, d = t[0:3]
before_n_days=[]
for i in range(1, n + 1)[::-1]:
before_n_days.append(str(datetime.datetime(y, m, d)- datetime.timedelta(days=i))[0:10])
# Date = str(datetime.datetime(y, m, d) - datetime.timedelta(n)).split()
return before_n_days
aa=get_day_nday_ago('2018-06-10',7)
set(aa)&set(list(data.index))
t=list(data.index)
time_window=30
res={}
for i in t:
aa = get_day_nday_ago(i, time_window)
data_time_window=list(set(aa) & set(list(data.index)))
tt=data.loc[data_time_window]
if len(data_time_window)<=1:
continue
res[i]=sum(tt['volume'])/len(data_time_window)
res2={}
for i in t:
data_day=data.loc[i]
res2[i]=data_day['volume']
res3={}
res4={}
mairu={}
maichu={}
for i,j in res.items():
res3[i]=res2[i]-res[i]
if res[i]>=1.2*res2[i]:
res4[i]=1
mairu[i]=1
maichu[i]=0
elif res[i]<0.8*res2[i]:
res4[i]=-1
maichu[i]=-1
mairu[i]=0
else:
res4[i]=0
mairu[i]=0
maichu[i]=0
zz= | pd.DataFrame(res,index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 14:26:29 2020
@author: skyhigh
"""
import pandas as pd
columns =['matth','english','science']
indexs = ['John','Julia',"Mary","Henry"]
datas=[[80,70,90],[88,87,99],[77,66,76],[90,98,96]]
df = | pd.DataFrame(datas,columns=columns,index=indexs) | pandas.DataFrame |
# http://www.vdh.virginia.gov/coronavirus/
from bs4 import BeautifulSoup
import csv
from datetime import datetime
from io import StringIO
import os
import requests
import pandas as pd
# Remove empty rows
def filtered(rows):
return [x for x in rows if "".join([(x[y] or "").strip() for y in x]) != ""]
def run_VA(args):
# Parameters
raw_name = '../VA/raw'
data_name = '../VA/data/data_%s.csv'
now = datetime.now()
links = [("locality", "https://data.virginia.gov/resource/bre9-aqqr.csv"),
("conf", "https://data.virginia.gov/resource/uqs3-x7zh.csv"),
("dist", "https://data.virginia.gov/resource/v5a8-4ahw.csv"),
("age", "https://data.virginia.gov/resource/uktn-mwig.csv"),
("sex", "https://data.virginia.gov/resource/tdt3-q47w.csv"),
("race_ethnicity", "https://data.virginia.gov/resource/9sba-m86n.csv")]
for link in links:
most_recent = ""
exists = os.path.exists(data_name % link[0])
out = []
# If current data file does not exist
if not exists:
version = 0
v_exists = True
while v_exists:
version += 1
v_exists = os.path.exists((data_name % (link[0] + "_V" + str(version))))
version = version - 1
v_df = pd.read_csv((data_name % (link[0] + "_V" + str(version))))
date_col = ""
for col in v_df.columns:
if "date" in col.lower() and "report" in col.lower():
date_col = col
break
# Getting most recent date
dates = ( | pd.to_datetime(v_df[date_col]) | pandas.to_datetime |
import copy
from GTac_Data import gtac_data
from data_gen import raw_data_byts_checkout_2, collect_DataPoints
# from data_collect_fingers_five import COLUMNS_RAW_FINGER_DATA, MAG_NUM, COL_INDEX
from gtac_config import COLUMNS_RAW_FINGER_DATA, MAG_NUM, COL_INDEX
# from Handover import collect_DataPoints, find_location, find_mat_value
# from Stably_Gentle_Grasping import find_mat_sum_sec, reactive_pinch
from draw_bubbles_py_3 import setup_scatter_ax, plot_fingertip_2
# from draw_lines2 import update_vals
import serial
import time
import pandas as pd
import numpy as np
import argparse
import matplotlib
# matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
# from GTac_Hand import gtac_hand
window_length = 200
x = np.linspace(0, 199, 200)
y = np.zeros(len(x))
mag_x = []
mag_y = []
mag_z = []
mat_x_0 = [4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1]
mat_y_0 = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
mat_x = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
mat_y = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
press_location_r = 2.5
press_location_r_list = []
press_location_c = 2.5
press_location_c_list = []
sum_value = 0
sum_value_list = []
mat_sz = np.zeros(16)
mat_amp_index = 10
pressing_loc_amp_index = 2
mat_loc_index = 0.001
def update_vals(data_frame_array, finger=1, sec=2, window_len=200):
tri_index = finger * 9 + (2 - sec) * 3
sum_r = 0
sum_c = 0
global mag_x, mag_y, mag_z, sum_value, press_location_r, press_location_c, \
sum_value_list, press_location_r_list, press_location_c_list
sum_value = 0
# update magnetic and resistive signals for GTac Bubbles
for i in range(len(mat_x)):
r = i // 4
c = i % 4
index, value = gtac_data.find_mat_value(data_frame_array, finger, sec, r, c)
if value > 20: # threshold to remove noise for obtaining pressing location
sum_r += (r + 1) * value
sum_c += (c + 1) * value
sum_value += value
mat_sz[i] = abs(value * mat_amp_index)
else:
mat_sz[i] = 0
mat_x[i] = c + 1 + data_frame_array[tri_index] * mat_loc_index
mat_y[i] = r + 1 + data_frame_array[tri_index + 1] * mat_loc_index
# update pressing locations
if sum_value != 0:
press_location_r = round(sum_r / sum_value, 1)
press_location_c = round(sum_c / sum_value, 1)
# update magnetic signals
mag_x.append(data_frame_array[tri_index])
mag_y.append(data_frame_array[tri_index + 1])
mag_z.append(abs(data_frame_array[tri_index + 2]))
sum_value_list.append(sum_value)
press_location_r_list.append(press_location_r - 1)
press_location_c_list.append(press_location_c - 1)
if len(mag_x) > window_len:
mag_x = mag_x[-window_len:]
mag_y = mag_y[-window_len:]
mag_z = mag_z[-window_len:]
sum_value_list = sum_value_list[-window_len:]
press_location_r_list = press_location_r_list[-window_len:]
press_location_c_list = press_location_c_list[-window_len:]
print('r:{};c:{}'.format(press_location_r, press_location_c))
# update vals for plot gaussian
# zarray = gaus2d(x=x_mesh, y=y_mesh,
# mx=press_location_r,
# my=press_location_c,
# sx=1,
# sy=1)
# define normalized 2D gaussian
def gaus2d(x=0, y=0, mx=0, my=0, sx=1, sy=1):
return 1. / (2. * np.pi * sx * sy) * np.exp(-((x - mx)**2. / (2. * sx**2.) + (y - my)**2. / (2. * sy**2.)))
def plot_pressing_loc(scat, press_location_r, press_location_c, sec_sum):
scat.set_offsets(np.array([press_location_c, press_location_r]))
scat.set_sizes([sec_sum * pressing_loc_amp_index])
def set_data_sec(f4_ax1_scat_tri_mat, f4_ax1_scat_pre_loc,
f4_ax2_magx, f4_ax2_magy, f4_ax3_magz,
f4_ax3_mat_sum, f4_ax4_center_x, f4_ax4_center_y):
plot_fingertip_2(f4_ax1_scat_tri_mat, mat_x, mat_y, mat_sz)
plot_pressing_loc(f4_ax1_scat_pre_loc,
press_location_r,
press_location_c,
sum_value)
if len(mag_y) == window_length:
# print(len(mag_x),len(mag_y),len(mag_z),len(sum_value_list),len(press_location_c_list),len(press_location_r_list))
f4_ax2_magx.set_ydata(mag_x)
f4_ax2_magy.set_ydata(mag_y)
f4_ax3_magz.set_ydata(mag_z)
f4_ax3_mat_sum.set_ydata(sum_value_list)
f4_ax4_center_x.set_ydata(press_location_c_list)
f4_ax4_center_y.set_ydata(press_location_r_list)
def setup_scatter_ax2(ax):
# rect is the box edge
rect = plt.Rectangle((-1, -1),
5,
5,
ec='none', lw=2, fc='none')
ax.add_patch(rect)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
scat_base = ax.scatter(mat_x_0, mat_y_0, s=1500, alpha=0.4)
scat_tri_mat = ax.scatter(mat_x, mat_y, s=150, alpha=1)
scat_pre_loc = ax.scatter(press_location_c, press_location_r, s=150, alpha=1)
return scat_tri_mat, scat_pre_loc
def setup_figures():
# prepare the figure
fig4 = plt.figure(figsize=(9, 5), constrained_layout=True)
gs = fig4.add_gridspec(3, 5)
f4_ax1 = fig4.add_subplot(gs[:, :-2],
aspect='equal',
autoscale_on=False,
xlim=(0, 5), ylim=(0, 5))
f4_ax1.set_title('GTac Bubbles')
f4_ax1_scat_tri_mat, f4_ax1_scat_pre_loc = setup_scatter_ax2(f4_ax1)
f4_ax2 = fig4.add_subplot(gs[0, -2:])
f4_ax2.set_title('Shear Force Signals (uT)')
f4_ax2.set_ylim([-500, 500])
f4_ax2_magx = f4_ax2.plot(np.zeros(window_length), label='SA-II x')[0]
f4_ax2_magy = f4_ax2.plot(np.zeros(window_length), label='SA-II y')[0]
# f4_ax3_magz = f4_ax2.plot(np.zeros(window_length), label='mag-z')[0]
f4_ax2.legend(loc=0)
f4_ax3 = fig4.add_subplot(gs[1, -2:])
f4_ax3.set_title('Normal Force Signals')
f4_ax3.set_ylim([0, 2000])
f4_ax3_mat_sum = f4_ax3.plot(np.zeros(window_length),
label='FA-I Sum')[0]
f4_ax3_magz = f4_ax3.plot(np.zeros(window_length), label='SA-II z')[0]
# f4_ax3_mag_z = f4_ax3.plot(np.zeros(window_length),
# label='mag-z')[0]
f4_ax3.legend(loc=0)
f4_ax4 = fig4.add_subplot(gs[2, -2:])
f4_ax4.set_title('Normal Force Center')
f4_ax4.set_ylim([0, 4])
f4_ax4_center_x = f4_ax4.plot(np.zeros(window_length), label='x')[0]
f4_ax4_center_y = f4_ax4.plot(np.zeros(window_length), label='y')[0]
f4_ax4.legend()
# fig1 = plt.figure()
# f1_ax1_gaussian = fig1.add_subplot(111, projection='3d')
# f1_ax1_gaussian.set_title('GTac Super-Resotion')
# f1_ax1_gaussian_plot = [f1_ax1_gaussian.plot_surface(x_mesh, y_mesh, zarray[:, :], color='0.75', rstride=1, cstride=1)]
return fig4, f4_ax1_scat_tri_mat, f4_ax1_scat_pre_loc, \
f4_ax2_magx, f4_ax2_magy, f4_ax3_magz, \
f4_ax3_mat_sum, f4_ax4_center_x, f4_ax4_center_y
def animate2(i):
print(i)
global TO_MOVE, TO_RELEASE, pinch, time_thumb_fle, last_time_12, last_time_12_inv
start_in = time.time()
data = raw_data_byts_checkout_2(ser, verbose=False)
ms = int(round((time.time() - start) * 1000))
# data.append(ms)
data = np.append(data, ms)
# data = gtac_data.preprocess_(data)
# dt_list.append(data)
data_frame_array = data - avg # average by the initial data
to_return = []
for f_ind, f in enumerate(finger_to_plot):
for s_ind, s in enumerate(sec_to_plot):
update_vals(data_frame_array, finger=f, sec=s)
ind = f_ind * len(sec_to_plot) + s_ind
# print(ind)
set_data_sec(ax1_scat_tri_mat_list[ind],
ax1_scat_pre_loc_list[ind],
ax2_magx_list[ind],
ax2_magy_list[ind],
ax2_magz_list[ind],
ax3_mat_sum_list[ind],
ax4_center_x_list[ind],
ax4_center_y_list[ind])
to_return.append(ax1_scat_tri_mat_list[ind])
to_return.append(ax1_scat_pre_loc_list[ind])
to_return.append(ax2_magx_list[ind])
to_return.append(ax2_magy_list[ind])
to_return.append(ax2_magz_list[ind])
to_return.append(ax3_mat_sum_list[ind])
to_return.append(ax4_center_x_list[ind])
to_return.append(ax4_center_y_list[ind])
# control the fingers to grasp
# pinch,time_thumb_fle,last_time_12,last_time_12_inv = reactive_pinch(data_frame_array,ser,
# pinch,time_thumb_fle,last_time_12,last_time_12_inv)
# mat_sum_sec = find_mat_sum_sec(data_frame_array,
# mat_th=50,
# verbose=False)
# if mat_sum_sec[2, 0] > 50 and not pinch:
# pinch = True
# # creat current time stamp
# time_ctrl = time.time()
# if pinch and mat_sum_sec[0, 2] < 300 and time_ctrl - time_thumb_fle > 0.05:
# ser.write(b'<21>')
# time_thumb_fle = time_ctrl
#
# if pinch and mat_sum_sec[1, 0] < 600 and time_ctrl - last_time_12 > 0.1:
# ser.write(b'<41>')
# ser.write(b'<1-1>')
# # ser.write(b'<31>')
# # ser.write(b'<51>')
# # ser.write(b'<61>')
# last_time_12 = time_ctrl
#
# if pinch and mat_sum_sec[1, 0] > 800 and time_ctrl - last_time_12_inv > 0.1:
# ser.write(b'<4-1>')
# ser.write(b'<2-1>')
# ser.write(b'<11>')
# # ser.write(b'<3-1>')
# # ser.write(b'<5-1>')
# # ser.write(b'<6-1>')
# last_time_12_inv = time_ctrl
# if time.time() - start > 5 and TO_MOVE:
# ser.write(b'<220>')
# ser.write(b'<450>')
# TO_MOVE = False
# TO_RELEASE = True
# if time.time() - start > 15 and TO_RELEASE:
# ser.write(b'<>')
# TO_RELEASE = False
print('frames {}, time {}ms'.format(i, round((time.time() - start_in) * 1000)))
return to_return
if __name__ == '__main__':
# current time
# sudo chmod 666 /dev/ttyACM0
timestr = time.strftime("%Y%m%d_%H%M%S")
# parse the argumments
parser = argparse.ArgumentParser()
parser.add_argument("-sp", "--serialport", default='/dev/ttyACM1',
help="set serial port (default: COM6)") # ubuntu: /dev/ttyACM0
parser.add_argument("-f", "--finger", default=0, type=int,
help="set the finger to visualize")
parser.add_argument("-s", "--section", default=0, type=int,
help="set the section to visualize")
# Read arguments from command line
args = parser.parse_args()
SerialPort, finger, sec = args.serialport, \
args.finger, \
args.section
# creat a pandas DataFrame to store the data
df_RAW = | pd.DataFrame(columns=COLUMNS_RAW_FINGER_DATA) | pandas.DataFrame |
import pandas as pd
import json, os
from datetime import timedelta
f = open('data/game_def.json')
game_def = json.load(f)
def getHardwareLog(start_date, end_date, hwlogpath):
df = pd.read_csv(hwlogpath, header=[1])
df['Time'] = pd.to_datetime(df['Time']).dt.tz_localize("America/New_York")
mask = (df['Time'] >= start_date) & (df['Time'] <= end_date)
segment = df.loc[mask]
return segment
def getGame(target):
for game in game_def:
if game['name'] == target: return game
def compute(trial, hwlogpath, index, platform):
if not os.path.exists(f'data/processed/hw-study/{platform}/'):
os.mkdir(f'data/processed/hw-study/{platform}/')
trialStart = trial['init_time']
idxLoad = getGame(trial['game_name'])['times_load']
loadStart = trialStart + pd.Timedelta(seconds=trial['times'][idxLoad[1]])
loadEnd = trialStart + pd.Timedelta(seconds=trial['times'][idxLoad[0]])
hw_df = getHardwareLog(loadStart, loadEnd, hwlogpath)
hw_df.to_csv(f'data/processed/hw-study/{platform}/{trial["game_name"]}-game-{trial["iteration"]}.csv')
idxLevel = getGame(trial['game_name'])['times_level']
levelStart = trialStart + | pd.Timedelta(seconds=trial['times'][idxLevel[1]]) | pandas.Timedelta |
import os
import speedtest
import time
import sys
import shutil
import pandas as pd
from pythonping import ping
import argparse
import logging
import traceback
import configparser
from PyQt5.QtCore import pyqtSignal, QObject
from modules.visuals import InteractivePlots
class Communicate(QObject):
GUI_signal = pyqtSignal(str)
# define logger
main_logger = logging.getLogger("main_logger")
main_logger.setLevel("WARNING")
class NetworkTest:
def __init__(self, defaults, callbackFunc):
self.interval = defaults["interval"]
self.ping_target = defaults["ping_target"]
self.threads = defaults["threads"]
self.path = defaults["path"]
self.config_path = os.path.join(self.path, "modules", "config_a.ini")
self.ping_file_name = defaults["ping_file_name"]
self.speed_test_file_name = defaults["speed_test_file_name"]
self.clear = defaults["clear"]
self.ping_file_path = os.path.join(self.path, "Data", self.ping_file_name)
self.speed_test_file_path = os.path.join(self.path, "Data", self.speed_test_file_name)
self.doPingTest = True
self.doSpeedTest = True
self.runningTest = False
self.readConfig()
self.visuals = InteractivePlots(self.path, self.ping_file_path, self.speed_test_file_path)
self.src = Communicate()
self.src.GUI_signal.connect(callbackFunc)
def readConfig(self):
config = configparser.ConfigParser()
try:
config.read(self.config_path)
# Get values from configuration file
self.save_every = float(config['DEFAULT']['save_every'])
self.ping_max_threshold = float(config['DEFAULT']['ping_max_threshold'])
except:
# In case no config-file is found or another reading error occured
print("Configuration file not found/readable.")
sys.exit(0)
def updateTestVariables(self, updatedVariables):
print(updatedVariables)
self.doPingTest = updatedVariables["doPingTest"]
self.doSpeedTest = updatedVariables["doSpeedTest"]
self.interval = updatedVariables["interval"]
self.ping_target = updatedVariables["ping_target"]
self.threads = updatedVariables["threads"]
self.path = updatedVariables["path"]
self.ping_file_name = updatedVariables["ping_file_name"]
self.speed_test_file_name = updatedVariables["speed_test_file_name"]
self.clear = updatedVariables["clear"]
self.ping_file_path = os.path.join(self.path, "Data", self.ping_file_name)
self.speed_test_file_path = os.path.join(self.path, "Data", self.speed_test_file_name)
# check if old files need to be moved in new dir.
# If dir archive dosen't exist - create new one
def archiveFiles(self):
if os.path.isdir(os.path.join(self.path, "Data", "archive")) is False:
# create archive folder
os.mkdir(os.path.join(self.path, "Data", "archive"))
date = time.strftime("_%Y_%m_%d_%H_%M_%S", time.localtime())
new_ping_name = "".join(
(self.ping_file_name.split(".")[0], date, ".", self.ping_file_name.split(".")[1]))
new_speed_test_name = "".join(
(self.speed_test_file_name.split(".")[0], date, ".", self.speed_test_file_name.split(".")[1]))
shutil.move(os.path.join(self.path, "Data", self.ping_file_name),
os.path.join(self.path, "Data", "archive", new_ping_name))
shutil.move(os.path.join(self.path, "Data", self.speed_test_file_name),
os.path.join(self.path, "Data", "archive", new_speed_test_name))
# check if old dataframe exists else create new one
def createDataFrames(self):
# check if a ping_test file already exists
if(self.doPingTest):
if self.ping_file_name not in os.listdir(os.path.join(self.path, "Data")):
self.df_my_ping = | pd.DataFrame(columns=["date", "min", "max", "avg", "url"]) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: udl
# language: python
# name: udl
# ---
# %% [markdown]
# # Sentiment analysis
#
# ### Some plotting functions
# %%
# Importing packages
from matplotlib import pyplot as plt
import numpy as np
import random
# %%
# Some functions to plot our points and draw the lines
def plot_points(features, labels):
X = np.array(features)
y = np.array(labels)
spam = X[np.argwhere(y==1)]
ham = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in spam],
[s[0][1] for s in spam],
s = 25,
color = 'cyan',
edgecolor = 'k',
marker = '^')
plt.scatter([s[0][0] for s in ham],
[s[0][1] for s in ham],
s = 25,
color = 'red',
edgecolor = 'k',
marker = 's')
plt.xlabel('aack')
plt.ylabel('beep')
plt.legend(['happy','sad'])
def draw_line(a,b,c, color='black', linewidth=2.0, linestyle='solid', starting=0, ending=3):
# Plotting the line ax + by + c = 0
x = np.linspace(starting, ending, 1000)
plt.plot(x, -c/b - a*x/b, linestyle=linestyle, color=color, linewidth=linewidth)
# %%
import pandas as pd
X = pd.DataFrame([[1,0],[0,2],[1,1],[1,2],[1,3],[2,2],[3,2],[2,3]])
y = | pd.Series([0,0,0,0,1,1,1,1]) | pandas.Series |
from numpy import *
import pandas as pd
| pd.set_option('precision',2) | pandas.set_option |
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.utils.config import merge_dicts
seed = 42
# ############# base.py ############# #
class MyData(vbt.Data):
@classmethod
def download_symbol(cls, symbol, shape=(5, 3), start_date=datetime(2020, 1, 1), columns=None, index_mask=None,
column_mask=None, return_arr=False, tz_localize=None, seed=seed):
np.random.seed(seed)
a = np.random.uniform(size=shape) + symbol
if return_arr:
return a
index = [start_date + timedelta(days=i) for i in range(a.shape[0])]
if a.ndim == 1:
sr = pd.Series(a, index=index, name=columns)
if index_mask is not None:
sr = sr.loc[index_mask]
if tz_localize is not None:
sr = sr.tz_localize(tz_localize)
return sr
df = pd.DataFrame(a, index=index, columns=columns)
if index_mask is not None:
df = df.loc[index_mask]
if column_mask is not None:
df = df.loc[:, column_mask]
if tz_localize is not None:
df = df.tz_localize(tz_localize)
return df
def update_symbol(self, symbol, n=1, **kwargs):
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start_date'] = self.data[symbol].index[-1]
shape = download_kwargs.pop('shape', (5, 3))
new_shape = (n, shape[1]) if len(shape) > 1 else (n,)
new_seed = download_kwargs.pop('seed', seed) + 1
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, shape=new_shape, seed=new_seed, **kwargs)
class TestData:
def test_config(self, tmp_path):
data = MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2'])
assert MyData.loads(data.dumps()) == data
data.save(tmp_path / 'data')
assert MyData.load(tmp_path / 'data') == data
def test_download(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object'))
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
1.15601864044243652
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, 1.7319939418114051],
[1.5986584841970366, 1.15601864044243652, 1.15599452033620265],
[1.05808361216819946, 1.8661761457749352, 1.6011150117432088],
[1.7080725777960455, 1.020584494295802447, 1.9699098521619943],
[1.8324426408004217, 1.21233911067827616, 1.18182496720710062]
],
index=index
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='raise', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='raise')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='test', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='test')
def test_update(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
]
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update().data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828]
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update(n=2).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828],
[0.24058961996534878, 0.3271390558111398, 0.8591374909485977]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
index2 = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
],
index=index2
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin')
.update(tz_localize=None).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
update_index_mask = vbt.symbol_dict({
0: [True],
1: [False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
update_index_mask2 = vbt.symbol_dict({
0: [True, False],
1: [False, True]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
np.nan
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan,
1.6090665392794814
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828],
[np.nan, np.nan, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan],
[1.2405896199653488, 1.3271390558111398, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
def test_concat(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.Series(
[
0.9507143064099162,
0.15601864044243652,
0.8661761457749352,
0.020584494295802447,
0.21233911067827616
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.Series(
[
0.7319939418114051,
0.15599452033620265,
0.6011150117432088,
0.9699098521619943,
0.18182496720710062
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.DataFrame(
[
[0.9507143064099162, 1.9507143064099162],
[0.15601864044243652, 1.15601864044243652],
[0.8661761457749352, 1.8661761457749352],
[0.020584494295802447, 1.020584494295802447],
[0.21233911067827616, 1.21233911067827616]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.DataFrame(
[
[0.7319939418114051, 1.7319939418114051],
[0.15599452033620265, 1.15599452033620265],
[0.6011150117432088, 1.6011150117432088],
[0.9699098521619943, 1.9699098521619943],
[0.18182496720710062, 1.18182496720710062]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
def test_get(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').get(),
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(),
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').get(),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(['feat0', 'feat1'])[0],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns= | pd.Int64Index([0, 1], dtype='int64', name='symbol') | pandas.Int64Index |
import sys
import pandas
from decisionengine_modules.glideinwms.transforms.grid_figure_of_merit import GridFigureOfMerit
grid_entries = ["g1", "g2", "g3", "g4", "g5"]
running = [5, 10, 15, 20, 200]
max_allowed = [10, 10, 10, 2000, 500]
idle = [20, 3, 4, 5, 6]
max_idle = [10, 10, 10, 10, 10]
entries = {
"EntryName": grid_entries,
"GlideinMonitorTotalStatusRunning": running,
"GlideinConfigPerEntryMaxGlideins": max_allowed,
"GlideinMonitorTotalStatusIdle": idle,
"GlideinConfigPerEntryMaxIdle": max_idle,
}
grid_df = | pandas.DataFrame(entries) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
import os
from functools import reduce
from statsmodels.tsa.stattools import coint
sns.set(style='white')
# Retrieve intraday price data and combine them into a DataFrame.
# 1. Load downloaded prices from folder into a list of dataframes.
folder_path = 'STATICS/PRICE'
file_names = os.listdir(folder_path)
tickers = [name.split('.')[0] for name in file_names]
df_list = [pd.read_csv(os.path.join('STATICS/PRICE', name)) for name in file_names]
# 2. Replace the closing price column name by the ticker.
for i in range(len(df_list)):
df_list[i].rename(columns={'close': tickers[i]}, inplace=True)
# 3. Merge all price dataframes. Extract roughly the first 70% data.
df = reduce(lambda x, y: pd.merge(x, y, on='date'), df_list)
idx = round(len(df) * 0.7)
df = df.iloc[:idx, :]
# Calculate and plot price correlations.
pearson_corr = df[tickers].corr()
sns.clustermap(pearson_corr).fig.suptitle('Pearson Correlations')
# Plot the marginal distributions.
sns.set(style='darkgrid')
sns.jointplot(df['JNJ'], df['PG'], kind='hex', color='#2874A6')
sns.jointplot(df['KO'], df['PEP'], kind='hex', color='#2C3E50')
# Calculate the p-value of cointegration test for JNJ-PG and KO-PEP pairs.
x = df['JNJ']
y = df['PG']
_, p_value, _ = coint(x, y)
print('The p_value of JNJ-PG pair cointegration is: {}'.format(p_value))
x = df['KO']
y = df['PEP']
_, p_value, _ = coint(x, y)
print('The p_value of KO-PEG pair cointegration is: {}'.format(p_value))
# Plot the linear relationship of the JNJ-PG pair.
df2 = df[['JNJ', 'PG']].copy()
spread = df2['JNJ'] - df2['PG']
mean_spread = spread.mean()
df2['Dev'] = spread - mean_spread
rnd = np.random.choice(len(df), size=500)
sns.scatterplot(x='JNJ', y='PG', hue='Dev', linewidth=0.3, alpha=0.8,
data=df2.iloc[rnd, :]).set_title('JNJ-PG Price Relationship')
# Plot the linear relationship of the KO-PEP pair.
df2 = df[['KO', 'PEP']].copy()
spread = df2['KO'] - df2['PEP']
mean_spread = spread.mean()
df2['Dev'] = spread - mean_spread
rnd = np.random.choice(len(df), size=500)
sns.scatterplot(x='KO', y='PEP', hue='Dev', linewidth=0.3, alpha=0.8,
data=df2.iloc[rnd, :]).set_title('KO-PEP Price Relationship')
# Plot the historical JNJ-PG prices and the spreads for a sample period.
def plot_spread(df, ticker1, ticker2, idx, th, stop):
px1 = df[ticker1].iloc[idx] / df[ticker1].iloc[idx[0]]
px2 = df[ticker2].iloc[idx] / df[ticker2].iloc[idx[0]]
sns.set(style='white')
# Set plotting figure
fig, ax = plt.subplots(2, 1, gridspec_kw={'height_ratios': [2, 1]})
# Plot the 1st subplot
sns.lineplot(data=[px1, px2], linewidth=1.2, ax=ax[0])
ax[0].legend(loc='upper left')
# Calculate the spread and other thresholds
spread = df[ticker1].iloc[idx] - df[ticker2].iloc[idx]
mean_spread = spread.mean()
sell_th = mean_spread + th
buy_th = mean_spread - th
sell_stop = mean_spread + stop
buy_stop = mean_spread - stop
# Plot the 2nd subplot
sns.lineplot(data=spread, color='#85929E', ax=ax[1], linewidth=1.2)
ax[1].axhline(sell_th, color='b', ls='--', linewidth=1, label='sell_th')
ax[1].axhline(buy_th, color='r', ls='--', linewidth=1, label='buy_th')
ax[1].axhline(sell_stop, color='g', ls='--', linewidth=1, label='sell_stop')
ax[1].axhline(buy_stop, color='y', ls='--', linewidth=1, label='buy_stop')
ax[1].fill_between(idx, sell_th, buy_th, facecolors='r', alpha=0.3)
ax[1].legend(loc='upper left', labels=['Spread', 'sell_th', 'buy_th', 'sell_stop', 'buy_stop'], prop={'size':6.5})
idx = range(11000, 12000)
plot_spread(df, 'JNJ', 'PG', idx, 0.5, 1)
idx = range(13000, 14000)
plot_spread(df, 'KO', 'PEP', idx, 0.5, 1)
# Generate correlated time-series.
# 1. Simulate 1000 correlated random variables by Cholesky Decomposition.
corr = np.array([[1.0, 0.9],
[0.9, 1.0]])
L = scipy.linalg.cholesky(corr)
rnd = np.random.normal(0, 1, size=(1000, 2))
out = rnd @ L
# 2. Simulate GBM returns and prices.
dt = 1/252
base1 = 110; mu1 = 0.03; sigma1 = 0.05
base2 = 80; mu2 = 0.01; sigma2 = 0.03
ret1 = np.exp((mu1 - 0.5 * (sigma1 ** 2) ) * dt + sigma1 * out[:, 0] * np.sqrt(dt))
ret2 = np.exp((mu2 - 0.5 * (sigma2 ** 2) ) * dt + sigma2 * out[:, 1] * np.sqrt(dt))
price1 = base1 * np.cumprod(ret1)
price2 = base2 * np.cumprod(ret2)
# 3. Calculate the return correlation and the p-value for cointegration testing.
corr_ret , _ = scipy.stats.pearsonr(ret1, ret2)
corr_price , _ = scipy.stats.pearsonr(price1, price2)
_, p_value, _ = coint(price1, price2)
print('GBM simulation result - return correlation: {}'.format(corr_ret))
print('GBM simulation result - price correlation: {}'.format(corr_price))
print('GBM simulation result - p-value for cointegration testing: {}'.format(p_value))
# 4. Plot the results.
df_gbm = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pdb
import os
import math
import argparse
if __name__ == '__main__':
#edit the directory
muat_dir = '/users/primasan/projects/muat/'
metadata = pd.read_csv(muat_dir + 'extfile/metadata_icgc_pcawg.tsv',sep='\t',index_col=0)
dictMutation = pd.read_csv(muat_dir + 'extfile/dictMutation.csv',index_col=0)
dictChpos = pd.read_csv(muat_dir + 'extfile/dictChpos.csv',index_col=0)
dictGES = pd.read_csv(muat_dir + 'extfile/dictGES.csv',index_col=0)
pcawg_dir = '/scratch/project_2001668/data/tcga/alltcga/'
simplified_data = '/scratch/project_2001668/data/tcga/simplified/'
tokenized_data = '/scratch/project_2001668/data/tcga/tokenized/'
all_class = os.listdir(pcawg_dir)
pd_all = []
for i in all_class:
pcawg_histology = i
allsamples = os.listdir(simplified_data + pcawg_histology)
for j in allsamples:
onesamples = j
onerow = (pcawg_histology,onesamples)
pd_all.append(onerow)
pd_allsamp = | pd.DataFrame(pd_all) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 22:34:12 2021
@author: orkun
"""
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from pandas.api.types import is_numeric_dtype
from sklearn import preprocessing
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QSize
from graph_view import graphics
class processes(object):
def __init__(self, process_list, export_list, dataframe, col_list, miss_list):
self.process_list = process_list
self.export_list = export_list
self.dataframe = dataframe
self.col_list = col_list
self.miss_list = miss_list
def process_steps(self):
self.procs_dic = {0: self.imput_data_get, 1: self.out_iqr_get, 2: self.scale_data_get}
if self.export_list[1]:
self.graph_class=graphics()
for index, item in enumerate(self.process_list):
if item:
processed_data = self.procs_dic[index]()
if processed_data is not None:
self.dataframe = processed_data
else:
return None
return self.dataframe
def imput_data_get(self):
num_imp = SimpleImputer(missing_values=np.nan, strategy='mean')
cat_imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputed_data = self.dataframe.copy()
for col_name in self.col_list:
if is_numeric_dtype(self.dataframe[col_name].dtypes):
imputer = num_imp.fit_transform( | pd.DataFrame(self.dataframe[col_name]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
#===============================================================================
def cleandots(x,mark):
if str(x) == mark : # The mark was a '.' in HW 3 use case.
return np.NaN
else:
return str (x)
def cleandotsincolumn (series,mark):
return series.apply (lambda x : cleandots (x,mark))
def cleandotsindataframe (mydataframe,mark):
columns = mydataframe.columns
for col in columns:
mydataframe [col] = cleandotsincolumn (mydataframe [col],mark)
return mydataframe
#===============================================================================
def rename(df,col,name1 , name2):
df[col][df[col] == name1] = name2
def WantedCols (df,wantedcol):
columns = df.columns
for col in columns:
if col not in wantedcol:
del df[col]
return df
def unWantedCols (df,unwanted):
for col in unwanted:
del df [col]
return df
#===============================================================================
def onlyNum(variable):
try :
variable = round (float (variable),6)
return [variable if type (variable) == int or type (variable) == float else np.NaN] [0]
except:
variable = np.NaN
return variable
# if type(variable) == int or type(variable) == float:
# return variable
# else:
# return np.NaN
def dfonlynum (df,*columns):
for col in columns:
df [col] = df [col].apply (onlyNum)
return df
def NumMask (df,col,x,y): # Select a set of numbers from a column. I used it for Ranks.
Numberlist= [i for i in range (x,y+1)]
mask= df [col].apply(lambda x : x in Numberlist)
return mask
#===============================================================================
def catchpattern(df,col,mypattern):
return df [col].apply (lambda x: re.findall(mypattern,str(x)) [0])
def splitname (df,col,splitchar):
return df [col].apply (lambda x : str(x).split (splitchar) [0].rstrip()) # Removing explanations (descriptions)
#===============================================================================
def print_full_rows(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def print_full_col (x):
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_columns')
def print_full (x) :
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_rows')
| pd.reset_option('display.max_columns') | pandas.reset_option |
# -*- coding: utf-8 -*-
"""
Tools for calculating the fatigue damage equivalent PSD. Adapted and
enhanced from the CAM versions.
"""
from types import SimpleNamespace
import itertools as it
import multiprocessing as mp
import numpy as np
import scipy.signal as signal
import pandas as pd
from pyyeti import cyclecount, srs, dsp
WN_ = None
SIG_ = None
ASV_ = None
BinAmps_ = None
Count_ = None
def _to_np_array(sh_arr):
return np.frombuffer(sh_arr[0]).reshape(sh_arr[1])
def _mk_par_globals(wn, sig, asv, binamps, count):
global WN_, SIG_, ASV_, BinAmps_, Count_
WN_ = _to_np_array(wn)
SIG_ = _to_np_array(sig)
ASV_ = _to_np_array(asv)
BinAmps_ = _to_np_array(binamps)
Count_ = _to_np_array(count)
def _dofde(args):
"""Utility routine for parallel processing"""
(j, (coeffunc, Q, dT, verbose)) = args
if verbose:
print(f"Processing frequency {WN_[j] / 2 / np.pi:8.2f} Hz", end="\r")
b, a = coeffunc(Q, dT, WN_[j])
resphist = signal.lfilter(b, a, SIG_)
ASV_[1, j] = abs(resphist).max()
ASV_[2, j] = np.var(resphist, ddof=1)
# use rainflow to count cycles:
ind = cyclecount.findap(resphist)
rf = cyclecount.rainflow(resphist[ind])
amp = rf["amp"]
count = rf["count"]
ASV_[0, j] = amp.max()
BinAmps_[j] *= ASV_[0, j]
# cumulative bin count:
for jj in range(BinAmps_.shape[1]):
pv = amp >= BinAmps_[j, jj]
Count_[j, jj] = np.sum(count[pv])
def fdepsd(
sig,
sr,
freq,
Q,
resp="absacce",
hpfilter=5.0,
winends="auto",
nbins=300,
T0=60.0,
rolloff="lanczos",
ppc=12,
parallel="auto",
maxcpu=14,
verbose=False,
):
r"""
Compute a fatigue damage equivalent PSD from a signal.
Parameters
----------
sig : 1d array_like
Base acceleration signal.
sr : scalar
Sample rate.
freq : array_like
Frequency vector in Hz. This defines the single DOF (SDOF)
systems to use.
Q : scalar > 0.5
Dynamic amplification factor :math:`Q = 1/(2\zeta)` where
:math:`\zeta` is the fraction of critical damping.
resp : string; optional
The type of response to base the damage calculations on:
========= =======================================
`resp` Damage is based on
========= =======================================
'absacce' absolute acceleration [#fde1]_
'pvelo' pseudo velocity [#fde2]_
========= =======================================
hpfilter : scalar or None; optional
High pass filter frequency; if None, no filtering is done.
If filtering is done, it is a 3rd order butterworth via
:func:`scipy.signal.lfilter`.
winends : None or 'auto' or dictionary; optional
If None, :func:`pyyeti.dsp.windowends` is not called. If
'auto', :func:`pyyeti.dsp.windowends` is called to apply a
0.25 second window or a 50 point window (whichever is smaller)
to the front. Otherwise, `winends` must be a dictionary of
arguments that will be passed to :func:`pyyeti.dsp.windowends`
(not including `signal`).
nbins : integer; optional
The number of amplitude levels at which to count cycles
T0 : scalar; optional
Specifies test duration in seconds
rolloff : string or function or None; optional
Indicate which method to use to account for the SRS roll off
when the minimum `ppc` value is not met. Either 'fft' or
'lanczos' seem the best. If a string, it must be one of these
values:
=========== ==========================================
`rolloff` Notes
=========== ==========================================
'fft' Use FFT to upsample data as needed. See
:func:`scipy.signal.resample`.
'lanczos' Use Lanczos resampling to upsample as
needed. See :func:`pyyeti.dsp.resample`.
'prefilter' Apply a high freq. gain filter to account
for the SRS roll-off. See
:func:`pyyeti.srs.preroll` for more
information. This option ignores `ppc`.
'linear' Use linear interpolation to increase the
points per cycle (this is not recommended;
method; it's only here as a test case).
'none' Don't do anything to enforce the minimum
`ppc`. Note error bounds listed above.
None Same as 'none'.
=========== ==========================================
If a function, the call signature is:
``sig_new, sr_new = rollfunc(sig, sr, ppc, frq)``. Here, `sig`
is 1d, len(time). The last three inputs are scalars. For
example, the 'fft' function is (trimmed of documentation)::
def fftroll(sig, sr, ppc, frq):
N = sig.shape[0]
if N > 1:
curppc = sr/frq
factor = int(np.ceil(ppc/curppc))
sig = signal.resample(sig, factor*N, axis=0)
sr *= factor
return sig, sr
ppc : scalar; optional
Specifies the minimum points per cycle for SRS calculations.
See also `rolloff`.
====== ==================================
`ppc` Maximum error at highest frequency
====== ==================================
3 81.61%
4 48.23%
5 31.58%
10 8.14% (minimum recommended `ppc`)
12 5.67%
15 3.64%
20 2.05%
25 1.31%
50 0.33%
====== ==================================
parallel : string; optional
Controls the parallelization of the calculations:
========== ============================================
`parallel` Notes
========== ============================================
'auto' Routine determines whether or not to run
parallel.
'no' Do not use parallel processing.
'yes' Use parallel processing. Beware, depending
on the particular problem, using parallel
processing can be slower than not using it.
On Windows, be sure the :func:`fdepsd` call
is contained within:
``if __name__ == "__main__":``
========== ============================================
maxcpu : integer or None; optional
Specifies maximum number of CPUs to use. If None, it is
internally set to 4/5 of available CPUs (as determined from
:func:`multiprocessing.cpu_count`).
verbose : bool; optional
If True, routine will print some status information.
Returns
-------
A SimpleNamespace with the members:
freq : 1d ndarray
Same as input `freq`.
psd : pandas DataFrame; ``len(freq) x 5``
The amplitude and damage based PSDs. The index is `freq` and
the five columns are: [G1, G2, G4, G8, G12]
=========== ===============================================
Name Description
=========== ===============================================
G1 The "G1" PSD (Mile's or similar equivalent from
SRS); uses the maximum cycle amplitude instead
of the raw SRS peak for each frequency. G1 is
not a damage-based PSD.
G2 The "G2" PSD of reference [#fde1]_; G2 >= G1 by
bounding lower amplitude counts down to 1/3 of
the maximum cycle amplitude. G2 is not a
damage-based PSD.
G4, G8, G12 The damage-based PSDs with fatigue exponents of
4, 8, and 12
=========== ===============================================
peakamp : pandas DataFrame; ``len(freq) x 5``
The peak response of SDOFs (single DOF oscillators) using each
PSD as a base input. The index and the five columns are the
same as for `psd`. The peaks are computed from the Mile's
equation (or similar if using ``resp='pvelo'``). The peak
factor used is ``sqrt(2*log(f*T0))``. Note that the first
column is, by definition, the maximum cycle amplitude for each
SDOF from the rainflow count (G1 was calculated from
this). Typically, this should be very close to the raw SRS
peaks contained in the `srs` output but a little lower since
SRS just grabs peaks without consideration of the opposite
peak.
binamps : pandas DataFrame; ``len(freq) x nbins``
A DataFrame of linearly spaced amplitude values defining the
cycle counting bins. The index is `freq` and the columns are
integers 0 to ``nbins - 1``. The values in each row (for a
specific frequency SDOF), range from 0.0 up to
``peakamp.loc[freq, "G1"] * (nbins - 1) / nbins``. In other
words, each value is the left-side amplitude boundary for that
bin. The next column for this matrix would be ``peakamp.loc[:,
"G1"]``.
count : pandas DataFrame; ``len(freq) x nbins``
Summary matrix of the rainflow cycle counts. Size corresponds
with `binamps` and the count is cumulative; that is, the count
in each entry includes cycles at the `binamps` amplitude and
above. Therefore, first column has total cycles for the SDOF.
bincount : pandas DataFrame; ``len(freq) x nbins``
Non-cumulative version of `count`. In other words, the values
are the number of cycles in the bin, left-side inclusive. The
last bin includes the count of maximum amplitude cycles.
di_sig : pandas DataFrame; ``len(freq) x 3``
Damage indicators computed from SDOF responses to the `sig`
signal. Index is `freq` and columns are ['b=4', 'b=8',
'b=12']. The value for each frequency is the sum of the cycle
count for a bin times its amplitude to the b power. That is,
for the j-th frequency, the indicator is::
amps = binamps.loc[freq[j]]
counts = bincount.loc[freq[j]]
di = (amps ** b) @ counts # dot product of two vectors
Note that this definition is slightly different than equation
14 from [#fde1]_ (would have to divide by the frequency), but
the same as equation 10 of [#fde2]_ without the constant.
di_test_part : pandas DataFrame; ``len(freq) x 3``
Test damage indicator without including the variance factor
(see note). Same size as `di_sig`. Each value depends only on
the frequency, `T0`, and the fatigue exponent ``b``. The ratio
of a signal damage indicator to the corresponding partial test
damage indicator is equal to the variance of the single DOF
response to the test raised to the ``b / 2`` power::
var_test ** (b / 2) = di_sig / di_test_part
.. note::
If the variance vactor (`var_test`) were included, then
the test damage indicator would be the same as
`di_sig`. This relationship is the basis of determining
the amplitude of the test signal.
var_test : pandas DataFrame; ``len(freq) x 3``
The required SDOF test response variances (see `di_test_part`
description). Same size as `di_sig`. The amplitude of the G4,
G8, and G12 columns of `psd` are computed from Mile's equation
(or similar) and `var_test`.
sig : 1d ndarray
The version of the input `sig` that is fed into the fatique
damage algorithm. This would be after any filtering,
windowing, and upsampling.
sr : scalar
The sample rate of the output `sig`.
srs : pandas Series; length = ``len(freq)``
The raw SRS peaks version of the first column in `amp`. See
`amp`. Index is `freq`.
var : pandas Series; length = ``len(freq)``
Vector of the SDOF response variances. Index is `freq`.
parallel : string
Either 'yes' or 'no' depending on whether parallel processing
was used or not.
ncpu : integer
Specifies the number of CPUs used.
resp : string
Same as the input `resp`.
Notes
-----
Steps (see [#fde1]_, [#fde2]_):
1. Resample signal to higher rate if highest frequency would
have less than `ppc` points-per-cycle. Method of increasing
the sample rate is controlled by the `rolloff` input.
2. For each frequency:
a. Compute the SDOF base-drive response
b. Calculate `srs` and `var` outputs
c. Use :func:`pyyeti.cyclecount.findap` to find cycle peaks
d. Use :func:`pyyeti.cyclecount.rainflow` to count cycles
and amplitudes
e. Put counts into amplitude bins
3. Calculate `g1` based on cycle amplitudes from maximum
amplitude (step 2d) and Mile's (or similar) equation.
4. Calculate `g2` to bound `g1` & lower amplitude cycles with
high counts. Ignore amplitudes < ``Amax/3``.
5. Calculate damage indicators from data with b = 4, 8, 12
where b is the fatigue exponent.
6. By equating the theoretical damage from a `T0` second random
vibration test to the damage from the input signal (step 5),
solve for the required test response variances for b = 4, 8,
12.
7. Solve for `g4`, `g8`, `g12` from the results of step 6 using
the Mile's equation (or similar); equations are shown below.
No checks are done regarding the suitability of this method for
the input data. It is recommended to read the references [#fde1]_
[#fde2]_ and do those checks (such as plotting Count or Time
vs. Amp**2 and comparing to theoretical).
The Mile's equation (or similar) is used in this methodology to
relate acceleration PSDs to peak responses. If `resp` is
'absacce', it is the Mile's equation:
.. math::
\sigma_{absacce}(f) = \sqrt{\frac{\pi}{2} \cdot f \cdot Q
\cdot PSD(f)}
If `resp` is 'pvelo', the similar equation is:
.. math::
\sigma_{pvelo}(f) = \sqrt{\frac{Q \cdot PSD(f)}{8 \pi f}}
Those two equations assume a flat acceleration PSD. Therefore, it
is recommended to compare SDOF responses from flight data (SRS) to
SDOF VRS responses from the developed specification (see
:func:`pyyeti.srs.vrs` to compute the VRS response in the
absolute-acceleration case). This is to check for conservatism.
Instead of using 3 for peak factor (for 3-rms or 3-sigma), use
:math:`\sqrt{2 \ln(f \cdot T_0)}` for the peak factor (derived
below). Also, enveloping multiple specifications from multiple Q's
is worth considering.
Note that this analysis can be time consuming; the time is
proportional to the number of frequencies multiplied by the number
of time steps in the signal.
The derivation of the peak factor is as follows. For the special
case of narrow band noise where the instantaneous amplitudes
follow the Gaussian distribution, the resulting probability
density function for the peak amplitudes follow the Rayleigh
distribution [#fde3]_. The single DOF response to Gaussian input
is reasonably estimated as Gaussian narrow band. Let this response
have the standard deviation :math:`\sigma`. From the Rayleigh
distribution, the probability of a peak being greater than
:math:`A` is:
.. math::
Prob[peak > A] = e ^ {\frac{-A^2}{2 \sigma^2}}
To estimate the maximum peak for the response of a single DOF
system with frequency :math:`f`, find the amplitude that would be
expected to occur once within the allotted time
(:math:`T_0`). That is, set the product of the probability of a
cycle amplitude being greater than :math:`A` and the number of
cycles equal to 1.0, and then solve for :math:`A`.
The number of cycles of :math:`f` Hz is :math:`N = f \cdot T_0`.
Therefore:
.. math::
\begin{aligned}
Prob[peak > A] \cdot N &= 1.0
e ^ {\frac{-A^2}{2 \sigma^2}} f \cdot T_0 &= 1.0
\frac{-A^2}{2 \sigma^2} &= \ln(1.0) - \ln(f \cdot T_0)
\frac{A^2}{2 \sigma^2} &= \ln(f \cdot T_0)
A &= \sqrt{2 \ln(f \cdot T_0)} \sigma
\end{aligned}
.. note::
In addition to the example shown below, this routine is
demonstrated in the pyYeti :ref:`tutorial`:
:doc:`/tutorials/fatigue`. There is also a link to the source
Jupyter notebook at the top of the tutorial.
References
----------
.. [#fde1] "Analysis of Nonstationary Vibroacoustic Flight Data
Using a Damage-Potential Basis"; <NAME>, <NAME>,
<NAME>; Journal of Spacecraft and Rockets, Vol 40, No. 5,
September-October 2003.
.. [#fde2] "Implementing the Fatigue Damage Spectrum and Fatigue
Damage Equivalent Vibration Testing"; <NAME>; 79th
Shock and Vibration Symposium, October 26 – 30, 2008.
.. [#fde3] Bendat, <NAME>., "Probability Functions for Random
Responses: Prediction of Peaks, Fatigue Damage, and
Catastrophic Failures", NASA Contractor Report 33 (NASA
CR-33), 1964.
See also
--------
:func:`scipy.signal.welch`, :func:`pyyeti.psd.psdmod`,
:func:`pyyeti.cyclecount.rainflow`, :func:`pyyeti.srs.srs`.
Examples
--------
Generate 60 second random signal to a pre-defined spec level,
compute the PSD several different ways and compare. Since it's 60
seconds, the damage-based PSDs should be fairly close to the input
spec level. The damage-based PSDs will be calculated with several
Qs and enveloped.
In this example, G2 envelopes G1, G4, G8, G12. This is not always
the case. For example, try TF=120; the damage-based curves go up
in order to fit equal damage in 60s.
One Count vs. Amp**2 plot is done for illustration.
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from pyyeti import psd, fdepsd
>>> import scipy.signal as signal
>>>
>>> TF = 60 # make a 60 second signal
>>> spec = np.array([[20, 1], [50, 1]])
>>> sig, sr, t = psd.psd2time(
... spec, ppc=10, fstart=20, fstop=50, df=1 / TF,
... winends=dict(portion=10), gettime=True)
>>>
>>> fig = plt.figure('Example', figsize=[9, 6])
>>> fig.clf()
>>> _ = plt.subplot(211)
>>> _ = plt.plot(t, sig)
>>> _ = plt.title(r'Input Signal - Specification Level = '
... '1.0 $g^{2}$/Hz')
>>> _ = plt.xlabel('Time (sec)')
>>> _ = plt.ylabel('Acceleration (g)')
>>> ax = plt.subplot(212)
>>> f, p = signal.welch(sig, sr, nperseg=sr)
>>> f2, p2 = psd.psdmod(sig, sr, nperseg=sr, timeslice=4,
... tsoverlap=0.5)
Calculate G1, G2, and the damage potential PSDs:
>>> psd_ = 0
>>> freq = np.arange(20., 50.1)
>>> for q in (10, 25, 50):
... fde = fdepsd.fdepsd(sig, sr, freq, q)
... psd_ = np.fmax(psd_, fde.psd)
>>> #
>>> _ = plt.plot(*spec.T, 'k--', lw=2.5, label='Spec')
>>> _ = plt.plot(f, p, label='Welch PSD')
>>> _ = plt.plot(f2, p2, label='PSDmod')
>>>
>>> # For plot, rename columns in DataFrame to include "Env":
>>> psd_ = (psd_
... .rename(columns={i: i + ' Env'
... for i in psd_.columns}))
>>> _ = psd_.plot.line(ax=ax)
>>> _ = plt.xlim(20, 50)
>>> _ = plt.title('PSD Comparison')
>>> _ = plt.xlabel('Freq (Hz)')
>>> _ = plt.ylabel(r'PSD ($g^{2}$/Hz)')
>>> _ = plt.legend(loc='upper left',
... bbox_to_anchor=(1.02, 1.),
... borderaxespad=0.)
>>> plt.tight_layout()
>>> fig.subplots_adjust(right=0.78)
.. plot::
:context: close-figs
Compare to theoretical bin counts @ 30 Hz:
>>> _ = plt.figure('Example 2')
>>> plt.clf()
>>> Frq = freq[np.searchsorted(freq, 30)]
>>> _ = plt.semilogy(fde.binamps.loc[Frq]**2,
... fde.count.loc[Frq],
... label='Data')
>>> # use flight time here (TF), not test time (T0)
>>> Amax2 = 2 * fde.var.loc[Frq] * np.log(Frq * TF)
>>> _ = plt.plot([0, Amax2], [Frq * TF, 1], label='Theory')
>>> y1 = fde.count.loc[Frq, 0]
>>> peakamp = fde.peakamp.loc[Frq]
>>> for j, lbl in enumerate(fde.peakamp.columns):
... _ = plt.plot([0, peakamp[j]**2], [y1, 1], label=lbl)
>>> _ = plt.title('Bin Count Check for Q=50, Freq=30 Hz')
>>> _ = plt.xlabel(r'$Amp^2$')
>>> _ = plt.ylabel('Count')
>>> _ = plt.legend(loc='best')
"""
sig, freq = np.atleast_1d(sig, freq)
if sig.ndim > 1 or freq.ndim > 1:
raise ValueError("`sig` and `freq` must both be 1d arrays")
if resp not in ("absacce", "pvelo"):
raise ValueError("`resp` must be 'absacce' or 'pvelo'")
(coeffunc, methfunc, rollfunc, ptr) = srs._process_inputs(
resp, "abs", rolloff, "primary"
)
if hpfilter is not None:
if verbose:
print(f"High pass filtering @ {hpfilter} Hz")
b, a = signal.butter(3, hpfilter / (sr / 2), "high")
# to try to get rid of filter transient at the beginning:
# - put a 0.25 second buffer on the front (length from
# looking at impulse response)
# - filter
# - chop off buffer
n = int(0.25 * sr)
sig2 = np.empty(n + sig.size)
sig2[:n] = sig[0]
sig2[n:] = sig
sig = signal.lfilter(b, a, sig2)[n:]
if winends == "auto":
sig = dsp.windowends(sig, min(int(0.25 * sr), 50))
elif winends is not None:
sig = dsp.windowends(sig, **winends)
mxfrq = freq.max()
curppc = sr / mxfrq
if rolloff == "prefilter":
sig, sr = rollfunc(sig, sr, ppc, mxfrq)
rollfunc = None
if curppc < ppc and rollfunc:
if verbose:
print(
f"Using {rolloff} method to increase sample rate (have "
f"only {curppc} pts/cycle @ {mxfrq} Hz"
)
sig, sr = rollfunc(sig, sr, ppc, mxfrq)
ppc = sr / mxfrq
if verbose:
print(f"After interpolation, have {ppc} pts/cycle @ {mxfrq} Hz\n")
LF = freq.size
dT = 1 / sr
pi = np.pi
Wn = 2 * pi * freq
parallel, ncpu = srs._process_parallel(
parallel, LF, sig.size, maxcpu, getresp=False
)
# allocate RAM:
if parallel == "yes":
# global shared vars will be: WN, SIG, ASV, BinAmps, Count
WN = (srs.copyToSharedArray(Wn), Wn.shape)
SIG = (srs.copyToSharedArray(sig), sig.shape)
ASV = (srs.createSharedArray((3, LF)), (3, LF))
BinAmps = (srs.createSharedArray((LF, nbins)), (LF, nbins))
a = _to_np_array(BinAmps)
a += np.arange(nbins, dtype=float) / nbins
Count = (srs.createSharedArray((LF, nbins)), (LF, nbins))
args = (coeffunc, Q, dT, verbose)
gvars = (WN, SIG, ASV, BinAmps, Count)
func = _dofde
with mp.Pool(
processes=ncpu, initializer=_mk_par_globals, initargs=gvars
) as pool:
for _ in pool.imap_unordered(func, zip(range(LF), it.repeat(args, LF))):
pass
ASV = _to_np_array(ASV)
Amax = ASV[0]
SRSmax = ASV[1]
Var = ASV[2]
Count = _to_np_array(Count)
BinAmps = a
else:
Amax = np.zeros(LF)
SRSmax = np.zeros(LF)
Var = np.zeros(LF)
BinAmps = np.zeros((LF, nbins))
BinAmps += np.arange(nbins, dtype=float) / nbins
Count = np.zeros((LF, nbins))
# loop over frequencies, calculating responses & counting
# cycles
for j, wn in enumerate(Wn):
if verbose:
print(f"Processing frequency {wn / 2 / pi:8.2f} Hz", end="\r")
b, a = coeffunc(Q, dT, wn)
resphist = signal.lfilter(b, a, sig)
SRSmax[j] = abs(resphist).max()
Var[j] = np.var(resphist, ddof=1)
# use rainflow to count cycles:
ind = cyclecount.findap(resphist)
rf = cyclecount.rainflow(resphist[ind])
amp = rf["amp"]
count = rf["count"]
Amax[j] = amp.max()
BinAmps[j] *= Amax[j]
# cumulative bin count:
for jj in range(nbins):
pv = amp >= BinAmps[j, jj]
Count[j, jj] = np.sum(count[pv])
if verbose:
print()
print("Computing outputs G1, G2, etc.")
# calculate non-cumulative counts per bin:
BinCount = np.hstack((Count[:, :-1] - Count[:, 1:], Count[:, -1:]))
# for calculating G2:
G2max = Amax ** 2
for j in range(LF):
pv = BinAmps[j] >= Amax[j] / 3 # ignore small amp cycles
if np.any(pv):
x = BinAmps[j, pv] ** 2
x2 = G2max[j]
y = np.log(Count[j, pv])
y1 = np.log(Count[j, 0])
g1y = np.interp(x, [0, x2], [y1, 0])
tantheta = (y - g1y) / x
k = np.argmax(tantheta)
if tantheta[k] > 0:
# g2 line is higher than g1 line, so find BinAmps**2
# where log(count) = 0; ie, solve for x-intercept in
# y = m x + b; (x, y) pts are: (0, y1), (x[k], y[k]):
G2max[j] = x[k] * y1 / (y1 - y[k])
# calculate flight-damage indicators for b = 4, 8 and 12:
b4 = 4
b8 = 8
b12 = 12
Df4 = np.zeros(LF)
Df8 = np.zeros(LF)
Df12 = np.zeros(LF)
for j in range(LF):
Df4[j] = (BinAmps[j] ** b4).dot(BinCount[j])
Df8[j] = (BinAmps[j] ** b8).dot(BinCount[j])
Df12[j] = (BinAmps[j] ** b12).dot(BinCount[j])
N0 = freq * T0
lnN0 = np.log(N0)
if resp == "absacce":
G1 = Amax ** 2 / (Q * pi * freq * lnN0)
G2 = G2max / (Q * pi * freq * lnN0)
# calculate test-damage indicators for b = 4, 8 and 12:
Abar = 2 * lnN0
Abar2 = Abar ** 2
Dt4 = N0 * 8 - (Abar2 + 4 * Abar + 8)
sig2_4 = np.sqrt(Df4 / Dt4)
G4 = sig2_4 / ((Q * pi / 2) * freq)
Abar3 = Abar2 * Abar
Abar4 = Abar2 * Abar2
Dt8 = N0 * 384 - (Abar4 + 8 * Abar3 + 48 * Abar2 + 192 * Abar + 384)
sig2_8 = (Df8 / Dt8) ** (1 / 4)
G8 = sig2_8 / ((Q * pi / 2) * freq)
Abar5 = Abar4 * Abar
Abar6 = Abar4 * Abar2
Dt12 = N0 * 46080 - (
Abar6
+ 12 * Abar5
+ 120 * Abar4
+ 960 * Abar3
+ 5760 * Abar2
+ 23040 * Abar
+ 46080
)
sig2_12 = (Df12 / Dt12) ** (1 / 6)
G12 = sig2_12 / ((Q * pi / 2) * freq)
Gmax = np.sqrt(np.vstack((G4, G8, G12)) * (Q * pi * freq * lnN0))
else:
G1 = (Amax ** 2 * 4 * pi * freq) / (Q * lnN0)
G2 = (G2max * 4 * pi * freq) / (Q * lnN0)
Dt4 = 2 * N0
sig2_4 = np.sqrt(Df4 / Dt4)
G4 = sig2_4 * ((4 * pi / Q) * freq)
Dt8 = 24 * N0
sig2_8 = (Df8 / Dt8) ** (1 / 4)
G8 = sig2_8 * ((4 * pi / Q) * freq)
Dt12 = 720 * N0
sig2_12 = (Df12 / Dt12) ** (1 / 6)
G12 = sig2_12 * ((4 * pi / Q) * freq)
Gmax = np.sqrt(np.vstack((G4, G8, G12)) * (Q * lnN0) / (4 * pi * freq))
# for output, scale the damage indicators:
Dt4 *= 4 # 2 ** (b/2)
Dt8 *= 16
Dt12 *= 64
# assemble outputs:
columns = ["G1", "G2", "G4", "G8", "G12"]
lcls = locals()
dct = {k: lcls[k] for k in columns}
Gpsd = pd.DataFrame(dct, columns=columns, index=freq)
Gpsd.index.name = "Frequency"
index = Gpsd.index
G2max = np.sqrt(G2max)
Gmax = pd.DataFrame(np.vstack((Amax, G2max, Gmax)).T, columns=columns, index=index)
BinAmps = pd.DataFrame(BinAmps, index=index)
Count = pd.DataFrame(Count, index=index)
BinCount = pd.DataFrame(BinCount, index=index)
Var = pd.Series(Var, index=index)
SRSmax = | pd.Series(SRSmax, index=index) | pandas.Series |
import datetime
import logging
import json
import requests
from pandas import json_normalize
import pandas as pd
from google.cloud import storage
from anyway.parsers.waze.waze_db_functions import (
insert_waze_alerts,
insert_waze_traffic_jams,
enrich_waze_alerts_ended_at_timestamp,
enrich_waze_traffic_jams_ended_at_timestamp,
)
from anyway.models import WazeAlert, WazeTrafficJams
ISRAEL_POLYGON = [
("33.717000", "32.547000"),
("34.722000", "33.004000"),
("35.793000", "33.331000"),
("35.914000", "32.953000"),
("35.750000", "32.723000"),
("35.395000", "31.084000"),
("34.931000", "29.473000"),
("33.717000", "32.547000"),
("33.717000", "32.547000"),
]
WAZE_ALERTS_API_PARAMS = {
"format": "JSON",
"tk": "ccp_partner",
"ccp_partner_name": "The Public Knowledge Workshop",
"types": "traffic,alerts,irregularities",
"polygon": ";".join([",".join(point) for point in ISRAEL_POLYGON]),
}
WAZE_ALERTS_API_URL = "https://il-georss.waze.com/rtserver/web/TGeoRSS"
logger = logging.getLogger("waze_data")
def list_blobs(bucket_name):
"""
Lists all the blobs in the bucket.
"""
storage_client = storage.Client()
blobs = storage_client.list_blobs(bucket_name)
return blobs
def parse_waze_alerts_data(waze_alerts, back_filled=False):
"""
parse waze alert json into a Dataframe.
param waze_alerts: waze raw alert json data
return: parsed Dataframe
"""
waze_df = json_normalize(waze_alerts)
waze_df["created_at"] = | pd.to_datetime(waze_df["pubMillis"], unit="ms") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 03:54:38 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.stats
import pandas as pd
from .linalg_operations import _check_shape
def get_param_table(params, se_params, degfree=None, index=None,
parameter_label=None, pdist=None, p_const=2.0):
if parameter_label is None:
parameter_label = 'parameter'
arr = np.vstack((_check_shape(params, 1), _check_shape(se_params, 1))).T
df = | pd.DataFrame(arr, index=index, columns=[parameter_label, 'SE']) | pandas.DataFrame |
from datetime import datetime
startTime = datetime.now()
import json
import glob
import os
import pandas as pd
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow import keras
from sklearn.model_selection import train_test_split
import requests
EPOCHS = 9
CLASSES = 2
"""
Build and return the Keras model ready to fit
"""
def build_classification_model(X_train):
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.sigmoid, input_shape=(X_train.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.sigmoid),
keras.layers.Dense(64, activation=tf.nn.sigmoid),
keras.layers.Dense(CLASSES, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
"""
get the percent ontime of a particular airline by id
"""
def get_airline_percent_ontime(id, airlines):
for obj in airlines:
if obj['airline_id'] == id:
return obj['airline_percent_ontime_arrival']
"""
get the percent ontime of a particular airport by id
"""
def get_airport_percent_ontime(id, airports):
for obj in airports:
if obj['airport_id'] == id:
return obj['airport_percent_ontime_departure']
"""
create the classes for classifiying each departure or arrival time as
ontime or late
"""
def create_classes(y):
for i in range(len(y)):
if y[i] < 10:
y[i] = 0
else:
y[i] = 1
return y
"""
create the classes and split the data into training and testing
"""
def prepare_data(X, y):
y = y.tolist()
y = create_classes(y)
return train_test_split(X, y, test_size=0.2, random_state=42)
"""
Run the program to laod data, create and fit model, test, and save model as json
"""
print('Getting airport and airline metadata from FlyGenius API...', end=' ')
r = requests.get('https://api.flygeni.us/airports/?use_details=True')
airports = r.json()
r = requests.get('https://api.flygeni.us/airlines/?use_details=True')
airlines = r.json()
print('done!\nLoading raw flight data from CSV files...', end=' ')
path = os.path.normpath(os.path.join(os.getcwd(), 'data/flight-data/*_2017_*/*.csv'))
all_data = glob.glob(path)
loaded_data = []
for path in all_data:
this_data = | pd.read_csv(path, skipinitialspace=True, low_memory=False) | pandas.read_csv |
# coding: utf-8
# In[3]:
import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# In[4]:
import matplotlib.pyplot as plt
import seaborn as sns
sns.set() # setting seaborn default for plots
# In[5]:
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
# In[6]:
title_mapping = {"Mr": 0, "Miss": 1, "Mrs": 2,
"Master": 3, "Dr": 3, "Rev": 3, "Col": 3, "Major": 3, "Mlle": 3,"Countess": 3,
"Ms": 3, "Lady": 3, "Jonkheer": 3, "Don": 3, "Dona" : 3, "Mme": 3,"Capt": 3,"Sir": 3 }
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping)
# In[7]:
# delete unnecessary feature from dataset
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
# In[8]:
sex_mapping = {"male": 0, "female": 1}
for dataset in train_test_data:
dataset['Sex'] = dataset['Sex'].map(sex_mapping)
# In[9]:
train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True)
test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True)
# In[10]:
for dataset in train_test_data:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0,
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 26), 'Age'] = 1,
dataset.loc[(dataset['Age'] > 26) & (dataset['Age'] <= 36), 'Age'] = 2,
dataset.loc[(dataset['Age'] > 36) & (dataset['Age'] <= 62), 'Age'] = 3,
dataset.loc[ dataset['Age'] > 62, 'Age'] = 4
# In[11]:
Pclass1 = train[train['Pclass']==1]['Embarked'].value_counts()
Pclass2 = train[train['Pclass']==2]['Embarked'].value_counts()
Pclass3 = train[train['Pclass']==3]['Embarked'].value_counts()
df = pd.DataFrame([Pclass1, Pclass2, Pclass3])
df.index = ['1st class','2nd class', '3rd class']
# In[12]:
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
# In[13]:
embarked_mapping = {"S": 0, "C": 1, "Q": 2}
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].map(embarked_mapping)
# In[14]:
# fill missing Fare with median fare for each Pclass
train["Fare"].fillna(train.groupby("Pclass")["Fare"].transform("median"), inplace=True)
test["Fare"].fillna(test.groupby("Pclass")["Fare"].transform("median"), inplace=True)
# In[15]:
for dataset in train_test_data:
dataset.loc[ dataset['Fare'] <= 17, 'Fare'] = 0,
dataset.loc[(dataset['Fare'] > 17) & (dataset['Fare'] <= 30), 'Fare'] = 1,
dataset.loc[(dataset['Fare'] > 30) & (dataset['Fare'] <= 100), 'Fare'] = 2,
dataset.loc[ dataset['Fare'] > 100, 'Fare'] = 3
# In[16]:
for dataset in train_test_data:
dataset['Cabin'] = dataset['Cabin'].str[:1]
# In[17]:
Pclass1 = train[train['Pclass']==1]['Cabin'].value_counts()
Pclass2 = train[train['Pclass']==2]['Cabin'].value_counts()
Pclass3 = train[train['Pclass']==3]['Cabin'].value_counts()
df = | pd.DataFrame([Pclass1, Pclass2, Pclass3]) | pandas.DataFrame |
from bokeh.sampledata.us_states import data as stateBorders
from bokeh.sampledata.us_counties import data as counties
from COVID.extract import COVID_counts
import pandas as pd
import numpy as np
import pickle
# stateBorders['D.C.'] = stateBorders.pop('DC')
stateBorders= | pd.DataFrame(stateBorders) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 22:47:56 2020
@author: nipunn
"""
def outlying_rows(filename):
mydataset= | pd.read_csv(filename) | pandas.read_csv |
import IPython
import base64
import cv2
import json
import numpy as np
import pandas as pd
import pravega.grpc_gateway as pravega
from matplotlib import pyplot as plt
import time
def ignore_non_events(read_events):
for read_event in read_events:
if len(read_event.event) > 0:
yield read_event
def opencv_image_to_mpl(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
class StreamBase():
def __init__(self, pravega_client, scope, stream, create=False):
self.pravega_client = pravega_client
self.scope = scope
self.stream = stream
if create:
self.create_stream()
def create_stream(self, min_num_segments=1):
return self.pravega_client.CreateStream(pravega.pb.CreateStreamRequest(
scope=self.scope,
stream=self.stream,
scaling_policy=pravega.pb.ScalingPolicy(min_num_segments=min_num_segments),
))
def get_stream_info(self):
return self.pravega_client.GetStreamInfo(pravega.pb.GetStreamInfoRequest(
scope=self.scope,
stream=self.stream,
))
class OutputStream(StreamBase):
def __init__(self, pravega_client, scope, stream, create=True):
super(OutputStream, self).__init__(pravega_client, scope, stream, create)
def write_video_from_file(self, filename, crop=None):
cap = cv2.VideoCapture(filename)
video_frames = self.opencv_video_frame_generator(cap)
cropped_video_frames = (self.cropped_video_frame(f, crop) for f in video_frames)
events_to_write = self.video_frame_write_generator(cropped_video_frames)
write_response = self.pravega_client.WriteEvents(events_to_write)
return write_response
def opencv_video_frame_generator(self, vidcap):
while True:
pos_frames = vidcap.get(cv2.CAP_PROP_POS_FRAMES)
success, image = vidcap.read()
if not success:
return
video_frame = dict(
image=image,
frameNumber=int(pos_frames),
timestamp=int(time.time() * 1000),
)
yield video_frame
def cropped_video_frame(self, video_frame, crop):
if crop:
left, top, right, bottom = crop
video_frame['image'] = video_frame['image'][top:bottom, left:right]
return video_frame
def video_frame_write_generator(self, video_frame_iter, camera=0):
for video_frame in video_frame_iter:
event_dict = video_frame.copy()
event_dict['camera'] = camera
event_dict['ssrc'] = 0
success, png_array = cv2.imencode('.png', video_frame['image'])
event_dict['data'] = base64.b64encode(png_array.tobytes()).decode(encoding='UTF-8')
del event_dict['image']
to_log_dict = event_dict.copy()
to_log_dict['data'] = '(%d bytes)' % len(event_dict['data'])
# print('video_frame_write_generator: ' + json.dumps(to_log_dict))
event_json = json.dumps(event_dict)
event_bytes = event_json.encode(encoding='UTF-8')
event_to_write = pravega.pb.WriteEventsRequest(
scope=self.scope,
stream=self.stream,
event=event_bytes,
routing_key=str(camera),
)
yield event_to_write
class UnindexedStream(StreamBase):
def __init__(self, pravega_client, scope, stream):
super(UnindexedStream, self).__init__(pravega_client, scope, stream)
def read_events(self, from_stream_cut=None, to_stream_cut=None):
read_events_request = pravega.pb.ReadEventsRequest(
scope=self.scope,
stream=self.stream,
from_stream_cut=from_stream_cut,
to_stream_cut=to_stream_cut,
)
return ignore_non_events(self.pravega_client.ReadEvents(read_events_request))
def read_event_to_video_frame(self, read_event):
event_json = read_event.event
video_frame = json.loads(event_json)
image_png = base64.b64decode(video_frame['data'])
del video_frame['data']
image_png_array = np.frombuffer(image_png, dtype=np.uint8)
image_array = cv2.imdecode(image_png_array, cv2.IMREAD_UNCHANGED)
video_frame['image_array'] = image_array
video_frame['timestamp'] = pd.to_datetime(video_frame['timestamp'], unit='ms', utc=True)
return video_frame
def read_video_frames(self, from_stream_cut=None, to_stream_cut=None):
read_events = self.read_events(from_stream_cut, to_stream_cut)
return (self.read_event_to_video_frame(read_event) for read_event in read_events)
def play_video(self, from_stream_cut=None, to_stream_cut=None, show_frame_interval=1):
read_events = self.read_video_frames(from_stream_cut, to_stream_cut)
for i, video_frame in enumerate(read_events):
if i % show_frame_interval == 0:
IPython.display.clear_output(wait=True)
plt.title('frameNumber=%d, timestamp=%s' % (video_frame['frameNumber'], video_frame['timestamp']))
plt.imshow(opencv_image_to_mpl(video_frame['image_array']));
plt.show()
class IndexedStream():
def __init__(self, pravega_client, scope, stream, from_stream_cut=None, timestamp_col='timestamp'):
self.pravega_client = pravega_client
self.scope = scope
self.stream = stream
self.from_stream_cut = from_stream_cut
self.timestamp_col = timestamp_col
self.index_df = None
def build_index(self):
stream_info = self.pravega_client.GetStreamInfo(
pravega.pb.GetStreamInfoRequest(scope=self.scope, stream=self.stream))
# print('stream_info=%s' % str(stream_info))
from_stream_cut = stream_info.head_stream_cut if self.from_stream_cut is None else self.from_stream_cut
to_stream_cut = stream_info.tail_stream_cut
read_events_request = pravega.pb.ReadEventsRequest(
scope=self.scope,
stream=self.stream,
from_stream_cut=from_stream_cut,
to_stream_cut=to_stream_cut,
)
# print(read_events_request)
read_events = ignore_non_events(self.pravega_client.ReadEvents(read_events_request))
index_list = [self.read_event_to_index(read_event) for read_event in read_events]
df = pd.DataFrame(index_list)
df[self.timestamp_col] = | pd.to_datetime(df[self.timestamp_col], unit='ms', utc=True) | pandas.to_datetime |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(5 * [[123, "Γειά σου", "Κόσμε"]], columns=["X", "Y", "Z"]),
"cp737",
),
],
)
def test_to_csv_compression(self, df, encoding, compression):
with tm.ensure_clean() as filename:
df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(
filename, compression=compression, index_col=0, encoding=encoding
)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_date_format__") as path:
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime("%Y-%m-%d")
)
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "1/1/2000", "1-1-2000"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with tm.ensure_clean("csv_date_format_with_dst") as path:
# make sure we are not failing on transitions
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous="infer",
)
for i in [times, times + pd.Timedelta("10s")]:
i = i._with_freq(None) # freq is not preserved by read_csv
time_range = np.array(range(len(i)), dtype="int64")
df = DataFrame({"A": time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/London"
)
tm.assert_frame_equal(result, df)
# GH11619
idx = date_range("2015-01-01", "2015-12-31", freq="H", tz="Europe/Paris")
idx = idx._with_freq(None) # freq does not round-trip
idx._data._freq = None # otherwise there is trouble on unpickle
df = DataFrame({"values": 1, "idx": idx}, index=idx)
with tm.ensure_clean("csv_date_format_with_dst") as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/Paris"
)
result["idx"] = to_datetime(result["idx"], utc=True).astype(
"datetime64[ns, Europe/Paris]"
)
tm.assert_frame_equal(result, df)
# assert working
df.astype(str)
with tm.ensure_clean("csv_date_format_with_dst") as path:
df.to_pickle(path)
result = pd.read_pickle(path)
tm.assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame(
{
"c_bool": [True, False],
"c_float": [1.0, 3.2],
"c_int": [42, np.nan],
"c_string": ["a", "b,c"],
}
)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'"0","True","1.0","42.0","a"',
'"1","False","3.2","","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'0,True,1.0,42.0,"a"',
'1,False,3.2,"","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE)
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE, escapechar=None)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,b!,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="!")
assert result == expected
expected_rows = [
",c_bool,c_ffloat,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,bf,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="f")
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text_rows = ["a,b,c", '1,"test \r\n",3']
text = tm.convert_rows_list_to_csv_str(text_rows)
df = read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding="utf-8", index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
df = df.set_index(["a", "b"])
expected_rows = ['"a","b","c"', '"1","3","5"', '"2","4","6"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", "2000-01-01,5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected_rows = [",0", "01-01-1990,4", "01-01-2000,5", "01-01-3005,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", ",5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_multi_index_header(self):
# see gh-5539
columns = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)])
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df.columns = columns
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
expected_rows = [",a,b,c,d", "0,1,2,3,4", "1,5,6,7,8"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_single_level_multi_index(self):
# see gh-26303
index = Index([(1,), (2,), (3,)])
df = DataFrame([[1, 2, 3]], columns=index)
df = df.reindex(columns=[(1,), (3,)])
expected = ",1,3\n0,1,3\n"
result = df.to_csv(line_terminator="\n")
| tm.assert_almost_equal(result, expected) | pandas._testing.assert_almost_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import os
import torch
import pandas as pd
import numpy as np
def save_csv_log(opt, head, value, is_create=False, file_name='test'):
if len(value.shape) < 2:
value = np.expand_dims(value, axis=0)
df = | pd.DataFrame(value) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime
import requests
class DataFetcher:
def __init__(self):
self.url_brazil_general = 'https://covid19-brazil-api.now.sh/api/report/v1/brazil/'
self.url_brazil_states = 'https://covid19-brazil-api.now.sh/api/report/v1'
self.url_world_cases = 'https://pomber.github.io/covid19/timeseries.json'
self.brazil_general_json = requests.get(self.url_brazil_general).json()
self.brazil_states_json = requests.get(self.url_brazil_states).json()
self.world_cases_json = requests.get(self.url_world_cases).json()
def get_apis_status_code(self):
brazil_general = requests.get(self.url_brazil_general).status_code
brazil_states = requests.get(self.url_brazil_states).status_code
world_cases = requests.get(self.url_world_cases).status_code
return brazil_general, brazil_states, world_cases
def get_main_counters(self):
brazil_counters = self.brazil_general_json
confirmed = brazil_counters['data']['confirmed']
deaths = brazil_counters['data']['deaths']
recovered = brazil_counters['data']['recovered']
return confirmed, deaths, recovered
def get_update_time(self):
update_time = self.brazil_general_json['data']['updated_at']
update_time_brazil = pd.to_datetime(update_time) - | pd.Timedelta(hours=3) | pandas.Timedelta |
from pandas.util import hash_pandas_object
import hashlib
import pandas as pd
import random
random.seed(42)
import numpy as np
import psutil
import time
ROWS = 20000000
DATA = [random.random() for _ in range(ROWS)]
def mem_use():
mem_profile = psutil.virtual_memory()
print("Memory Usage = {} | percent = {}".format(mem_profile.used,
mem_profile.percent))
def apply_assertion(df):
assert hashlib.sha256(pd.util.hash_pandas_object(df, index=True).values).hexdigest() == '867567dc7d46f77af2bca9804ac366a5165d27612de100461b699bd23094ab90'
## CREATE AND LOAD ARRAYS IN MEMORY
def using_numpy_arrays():
larray = []
for i in range(0,10):
mem_use()
larray.append(np.array(DATA)) #// 2.19 GB
del larray
time.sleep(5)
### USING HDF Storage
def using_hdf_storage():
store = pd.HDFStore('store.h5')
for i in range(0,10):
mem_use() ## constant memory usage (start with
store[f'v{i}'] = pd.DataFrame({f'v{i}':DATA})
df = pd.DataFrame()
generat = ( store[f'v{i}'] for i in range(0,10))
df = pd.concat(generat,axis=1)
mem_use()
print(df.head())
time.sleep(5)
using_hdf_storage()
exit()
import gc
# USING GENERATOR
def create_df_using_generator():
genera = ( pd.DataFramex for x in range(1,20000000))
mem_use()
myDf = pd.DataFrame()
myDf = | pd.concat(genera,axis=1) | pandas.concat |
from scipy.stats import mannwhitneyu,wilcoxon
import numpy as np
from scipy.io import mmread
import pandas as pd
X = mmread('RFiles/all_data.mtx')
X = X.tocsr()
celllabels = np.load('Notebooks/meta/celllabels.npy')
isCSF = np.load('Notebooks/meta/isCSF.npy')
isMS = np.load('Notebooks/meta/isMS.npy')
logX = np.log10(1+X.todense())
scaling_factor = logX.mean(axis=1)
norm_X = logX - scaling_factor.reshape(len(scaling_factor), 1)
# def MannWhitneyUTest(norm_X, idx1, idx2):
# res = []
# for i in range(X.shape[1]):
# x= np.asarray(X[idx1,i].todense()).ravel()
# y= np.asarray(X[idx2,i].todense()).ravel()
# if(len(np.unique(np.concatenate([x,y])))==1):
# res.append([-1,-1])
# else:
# res.append(mannwhitneyu(x,y,alternative = 'two-sided'))
# stat = np.asarray([x[0] for x in res])
# pvalue = np.asarray([x[1] for x in res])
# return(stat,pvalue)
def MannWhitneyUTest(X, idx1, idx2):
res = []
for i in range(X.shape[1]):
x= np.asarray(X[idx1,i]).ravel()
y= np.asarray(X[idx2,i]).ravel()
if(len(np.unique(np.concatenate([x,y])))==1):
res.append([-1,-1])
else:
res.append(mannwhitneyu(x,y,alternative = 'two-sided'))
stat = np.asarray([x[0] for x in res])
pvalue = np.asarray([x[1] for x in res])
return(stat,pvalue)
celltypes = ['B1', 'B2', 'CD4', 'CD8a', 'CD8n', 'Gran', 'MegaK', 'Mono', 'NK1',
'NK2', 'Tdg', 'Tregs', 'mDC1', 'mDC2', 'ncMono', 'pDC', 'plasma']
for i in celltypes:
idx1 = (celllabels==i) & isCSF & isMS
idx2 = (celllabels==i) & isCSF & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = | pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']) | pandas.DataFrame |
from __future__ import annotations
from collections import abc
from datetime import datetime
from functools import partial
from itertools import islice
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
List,
Tuple,
TypedDict,
Union,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import tslib
from pandas._libs.tslibs import (
OutOfBoundsDatetime,
Timedelta,
Timestamp,
iNaT,
nat_strings,
parsing,
timezones,
)
from pandas._libs.tslibs.parsing import ( # noqa:F401
DateParseError,
format_is_iso,
guess_datetime_format,
)
from pandas._libs.tslibs.strptime import array_strptime
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DateTimeErrorChoices,
Timezone,
npt,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import notna
from pandas.arrays import (
DatetimeArray,
IntegerArray,
)
from pandas.core import algorithms
from pandas.core.algorithms import unique
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype,
objects_to_datetime64ns,
tz_to_dtype,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.core.indexes.datetimes import DatetimeIndex
if TYPE_CHECKING:
from pandas._libs.tslibs.nattype import NaTType
from pandas._libs.tslibs.timedeltas import UnitChoices
from pandas import (
DataFrame,
Series,
)
# ---------------------------------------------------------------------
# types used in annotations
ArrayConvertible = Union[List, Tuple, AnyArrayLike]
Scalar = Union[int, float, str]
DatetimeScalar = Union[Scalar, datetime]
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
DatetimeDictArg = Union[List[Scalar], Tuple[Scalar, ...], AnyArrayLike]
class YearMonthDayDict(TypedDict, total=True):
year: DatetimeDictArg
month: DatetimeDictArg
day: DatetimeDictArg
class FulldatetimeDict(YearMonthDayDict, total=False):
hour: DatetimeDictArg
hours: DatetimeDictArg
minute: DatetimeDictArg
minutes: DatetimeDictArg
second: DatetimeDictArg
seconds: DatetimeDictArg
ms: DatetimeDictArg
us: DatetimeDictArg
ns: DatetimeDictArg
DictConvertible = Union[FulldatetimeDict, "DataFrame"]
start_caching_at = 50
# ---------------------------------------------------------------------
def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return guess_datetime_format(arr[non_nan_elements[0]], dayfirst=dayfirst)
def should_cache(
arg: ArrayConvertible, unique_share: float = 0.7, check_count: int | None = None
) -> bool:
"""
Decides whether to do caching.
If the percent of unique elements among `check_count` elements less
than `unique_share * 100` then we can do caching.
Parameters
----------
arg: listlike, tuple, 1-d array, Series
unique_share: float, default=0.7, optional
0 < unique_share < 1
check_count: int, optional
0 <= check_count <= len(arg)
Returns
-------
do_caching: bool
Notes
-----
By default for a sequence of less than 50 items in size, we don't do
caching; for the number of elements less than 5000, we take ten percent of
all elements to check for a uniqueness share; if the sequence size is more
than 5000, then we check only the first 500 elements.
All constants were chosen empirically by.
"""
do_caching = True
# default realization
if check_count is None:
# in this case, the gain from caching is negligible
if len(arg) <= start_caching_at:
return False
if len(arg) <= 5000:
check_count = len(arg) // 10
else:
check_count = 500
else:
assert (
0 <= check_count <= len(arg)
), "check_count must be in next bounds: [0; len(arg)]"
if check_count == 0:
return False
assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"
try:
# We can't cache if the items are not hashable.
unique_elements = set(islice(arg, check_count))
except TypeError:
return False
if len(unique_elements) > check_count * unique_share:
do_caching = False
return do_caching
def _maybe_cache(
arg: ArrayConvertible,
format: str | None,
cache: bool,
convert_listlike: Callable,
) -> Series:
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : bool
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series(dtype=object)
if cache:
# Perform a quicker unique check
if not should_cache(arg):
return cache_array
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
cache_array = Series(cache_dates, index=unique_dates)
# GH#39882 and GH#35888 in case of None and NaT we get duplicates
if not cache_array.index.is_unique:
cache_array = cache_array[~cache_array.index.duplicated()]
return cache_array
def _box_as_indexlike(
dt_array: ArrayLike, utc: bool | None = None, name: Hashable = None
) -> Index:
"""
Properly boxes the ndarray of datetimes to DatetimeIndex
if it is possible or to generic Index instead
Parameters
----------
dt_array: 1-d array
Array of datetimes to be wrapped in an Index.
tz : object
None or 'utc'
name : string, default None
Name for a resulting index
Returns
-------
result : datetime of converted dates
- DatetimeIndex if convertible to sole datetime64 type
- general Index otherwise
"""
if is_datetime64_dtype(dt_array):
tz = "utc" if utc else None
return DatetimeIndex(dt_array, tz=tz, name=name)
return Index(dt_array, name=name, dtype=dt_array.dtype)
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: Series,
name: str | None = None,
) -> Index:
"""
Convert array of dates with a cache and wrap the result in an Index.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : Index-like of converted dates
"""
from pandas import Series
result = Series(arg).map(cache_array)
return _box_as_indexlike(result._values, utc=None, name=name)
def _return_parsed_timezone_results(result: np.ndarray, timezones, tz, name) -> Index:
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray[int64]
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : Index-like of parsed dates with timezone
"""
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
)
if tz is not None:
# Convert to the same tz
tz_results = np.array([tz_result.tz_convert(tz) for tz_result in tz_results])
return Index(tz_results, name=name)
def _convert_listlike_datetimes(
arg,
format: str | None,
name: Hashable = None,
tz: Timezone | None = None,
unit: str | None = None,
errors: str = "raise",
infer_datetime_format: bool = False,
dayfirst: bool | None = None,
yearfirst: bool | None = None,
exact: bool = True,
):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parsed
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : str
None or string of the frequency of the passed data
errors : str
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : bool, default False
inferring format behavior from to_datetime
dayfirst : bool
dayfirst parsing behavior from to_datetime
yearfirst : bool
yearfirst parsing behavior from to_datetime
exact : bool, default True
exact format matching behavior from to_datetime
Returns
-------
Index-like of parsed dates
"""
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
arg_dtype = getattr(arg, "dtype", None)
# these are shortcutable
if is_datetime64tz_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == "utc":
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
elif tz:
# DatetimeArray, DatetimeIndex
return arg.tz_localize(tz)
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
return _to_datetime_with_unit(arg, unit, name, tz, errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
try:
arg, _ = maybe_convert_dtype(arg, copy=False, tz=timezones.maybe_get_tz(tz))
except TypeError:
if errors == "coerce":
npvalues = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg))
return DatetimeIndex(npvalues, name=name)
elif errors == "ignore":
idx = Index(arg, name=name)
return idx
raise
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
if format is not None:
res = _to_datetime_with_format(
arg, orig_arg, name, tz, format, exact, errors, infer_datetime_format
)
if res is not None:
return res
assert format is None or infer_datetime_format
utc = tz == "utc"
result, tz_parsed = objects_to_datetime64ns(
arg,
dayfirst=dayfirst,
yearfirst=yearfirst,
utc=utc,
errors=errors,
require_iso8601=require_iso8601,
allow_object=True,
)
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed))
return DatetimeIndex._simple_new(dta, name=name)
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
def _array_strptime_with_fallback(
arg,
name,
tz,
fmt: str,
exact: bool,
errors: str,
infer_datetime_format: bool,
) -> Index | None:
"""
Call array_strptime, with fallback behavior depending on 'errors'.
"""
utc = tz == "utc"
try:
result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors)
except OutOfBoundsDatetime:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(iNaT)
else:
result = arg
except ValueError:
# if fmt was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(iNaT)
else:
result = arg
else:
# Indicates to the caller to fallback to objects_to_datetime64ns
return None
else:
if "%Z" in fmt or "%z" in fmt:
return _return_parsed_timezone_results(result, timezones, tz, name)
return _box_as_indexlike(result, utc=utc, name=name)
def _to_datetime_with_format(
arg,
orig_arg,
name,
tz,
fmt: str,
exact: bool,
errors: str,
infer_datetime_format: bool,
) -> Index | None:
"""
Try parsing with the given format, returning None on failure.
"""
result = None
# shortcut formatting here
if fmt == "%Y%m%d":
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
try:
# may return None without raising
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, OutOfBoundsDatetime) as err:
raise ValueError(
"cannot convert the input to '%Y%m%d' date format"
) from err
if result is not None:
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
# fallback
res = _array_strptime_with_fallback(
arg, name, tz, fmt, exact, errors, infer_datetime_format
)
return res
def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index:
"""
to_datetime specalized to the case where a 'unit' is passed.
"""
arg = extract_array(arg, extract_numpy=True)
# GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime
# because it expects an ndarray argument
if isinstance(arg, IntegerArray):
arr = arg.astype(f"datetime64[{unit}]")
tz_parsed = None
else:
arg = np.asarray(arg)
arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if errors == "ignore":
# Index constructor _may_ infer to DatetimeIndex
result = Index._with_infer(arr, name=name)
else:
result = DatetimeIndex(arr, name=name)
if not isinstance(result, DatetimeIndex):
return result
# GH#23758: We may still need to localize the result with tz
# GH#25546: Apply tz_parsed first (from arg), then tz (from caller)
# result will be naive but in UTC
result = result.tz_localize("UTC").tz_convert(tz_parsed)
if tz is not None:
if result.tz is None:
result = result.tz_localize(tz)
else:
result = result.tz_convert(tz)
return result
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : str
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == "julian":
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != "D":
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError as err:
raise ValueError(
"incompatible 'arg' type for given 'origin'='julian'"
) from err
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise OutOfBoundsDatetime(
f"{original} is Out of Bounds for origin='julian'"
)
else:
# arg must be numeric
if not (
(is_scalar(arg) and (is_integer(arg) or is_float(arg)))
or is_numeric_dtype(np.asarray(arg))
):
raise ValueError(
f"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified"
)
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except OutOfBoundsDatetime as err:
raise OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") from err
except ValueError as err:
raise ValueError(
f"origin {origin} cannot be converted to a Timestamp"
) from err
if offset.tz is not None:
raise ValueError(f"origin offset {offset} must be tz-naive")
td_offset = offset - Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
ioffset = td_offset // Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):
arg = np.asarray(arg)
arg = arg + ioffset
return arg
@overload
def to_datetime(
arg: DatetimeScalar,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> Timestamp:
...
@overload
def to_datetime(
arg: Series | DictConvertible,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> Series:
...
@overload
def to_datetime(
arg: list | tuple | Index | ArrayLike,
errors: DateTimeErrorChoices = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool | None = ...,
format: str | None = ...,
exact: bool = ...,
unit: str | None = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> DatetimeIndex:
...
def to_datetime(
arg: DatetimeScalarOrArrayConvertible | DictConvertible,
errors: DateTimeErrorChoices = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: bool | None = None,
format: str | None = None,
exact: bool = True,
unit: str | None = None,
infer_datetime_format: bool = False,
origin="unix",
cache: bool = True,
) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
"""
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first. If a delimited date string cannot be parsed in
accordance with the given `dayfirst` option, e.g.
``to_datetime(['31-12-2021'])``, then a warning will be shown.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default None
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. Note that
:const:`"%f"` will parse all the way up to nanoseconds. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
.. versionchanged:: 0.25.0
changed default value from :const:`False` to :const:`True`.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like [‘year’, ‘month’, ‘day’, ‘minute’, ‘second’, ‘ms’, ‘us’, ‘ns’]) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Passing ``infer_datetime_format=True`` can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Non-convertible date/times**
If a date does not meet the `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing ``errors='ignore'``
will return the original input instead of raising any exception.
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, pytz.FixedOffset(-300)]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`. Instead a
simple :class:`Index` containing :class:`datetime.datetime` objects is
returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200', '2020-10-25 04:00 +0100'])
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is converted to
a timezone-aware :class:`DatetimeIndex` if the offsets of the timezone-aware
are constant:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00 -01:00", datetime(2020, 1, 1, 3, 0)])
DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'],
dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None)
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both naive and aware, string or datetime, the above
rules still apply
>>> from datetime import timezone, timedelta
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 12:00 -0530',
... datetime(2020, 1, 1, 18),
... datetime(2020, 1, 1, 18,
... tzinfo=timezone(-timedelta(hours=1)))],
... utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 17:30:00+00:00',
'2020-01-01 18:00:00+00:00', '2020-01-01 19:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
result: Timestamp | NaTType | Series | Index
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = arg.tz_convert(tz)
else:
result = arg.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, tz)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif is_list_like(arg):
try:
# error: Argument 1 to "_maybe_cache" has incompatible type
# "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,
# ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],
# Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"
argc = cast(
Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg
)
cache_array = _maybe_cache(argc, format, cache, convert_listlike)
except OutOfBoundsDatetime:
# caching attempts to create a DatetimeIndex, which may raise
# an OOB. If that's the desired behavior, then just reraise...
if errors == "raise":
raise
# ... otherwise, continue without the cache.
from pandas import Series
cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(argc, cache_array)
else:
result = convert_listlike(argc, format)
else:
result = convert_listlike(np.array([arg]), format)[0]
if isinstance(arg, bool) and isinstance(result, np.bool_):
result = bool(result) # TODO: avoid this kludge.
# error: Incompatible return value type (got "Union[Timestamp, NaTType,
# Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
# NaTType, None]")
return result # type: ignore[return-value]
# mappings for assembling units
_unit_map = {
"year": "year",
"years": "year",
"month": "month",
"months": "month",
"day": "day",
"days": "day",
"hour": "h",
"hours": "h",
"minute": "m",
"minutes": "m",
"second": "s",
"seconds": "s",
"ms": "ms",
"millisecond": "ms",
"milliseconds": "ms",
"us": "us",
"microsecond": "us",
"microseconds": "us",
"ns": "ns",
"nanosecond": "ns",
"nanoseconds": "ns",
}
def _assemble_from_unit_mappings(arg, errors: DateTimeErrorChoices, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`
- If :const:`'ignore'`, then invalid parsing will return the input
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import (
DataFrame,
to_numeric,
to_timedelta,
)
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ["year", "month", "day"]
req = sorted(set(required) - set(unit_rev.keys()))
if len(req):
_required = ",".join(req)
raise ValueError(
"to assemble mappings requires at least that "
f"[year, month, day] be specified: [{_required}] is missing"
)
# keys we don't recognize
excess = sorted(set(unit_rev.keys()) - set(_unit_map.values()))
if len(excess):
_excess = ",".join(excess)
raise ValueError(
f"extra keys have been passed to the datetime assemblage: [{_excess}]"
)
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if | is_integer_dtype(values) | pandas.core.dtypes.common.is_integer_dtype |
import json
import sys
import pandas as pd
from pandas import DataFrame
from db.sql import dal
from flask import request
import tempfile
import tarfile
import csv
import shutil
import subprocess
from flask import send_from_directory
from annotation.main import T2WMLAnnotation
from db.sql.kgtk import import_kgtk_dataframe
from api.variable.delete import VariableDeleter
from api.metadata.main import DatasetMetadataResource, VariableMetadataResource
from api.metadata.metadata import DatasetMetadata
from api.metadata.update import DatasetMetadataUpdater
from annotation.validation.validate_annotation import ValidateAnnotation
from time import time
from datetime import datetime
from typing import Dict, List, Any, Union, NoReturn, Optional, Tuple
import traceback
class AnnotatedData(object):
def __init__(self):
self.ta = T2WMLAnnotation()
self.va = ValidateAnnotation()
self.vmr = VariableMetadataResource()
self.vd = VariableDeleter()
def process(self, dataset, is_request_put=False):
l = time()
validate = request.args.get('validate', 'true').lower() == 'true'
files_only = request.args.get('files_only', 'false').lower() == 'true'
create_if_not_exist = request.args.get('create_if_not_exist', 'false').lower() == 'true'
return_tsv = request.args.get('tsv', 'false').lower() == 'true'
# check if the dataset exists
s = time()
dataset_qnode = dal.get_dataset_id(dataset)
print(f'time take to get dataset: {time() - s} seconds')
if not create_if_not_exist and not dataset_qnode:
print(f'Dataset not defined: {dataset}')
return {'Error': 'Dataset not found: {}'.format(dataset)}, 404
file_name = request.files['file'].filename
t2wml_yaml, metadata_edges = None, None
if 't2wml_yaml' in request.files:
request.files['t2wml_yaml'].seek(0)
t2wml_yaml = str(request.files['t2wml_yaml'].read(), 'utf-8')
if not (file_name.endswith('.xlsx') or file_name.endswith('.csv')):
return {"Error": "Please upload an annotated excel file or a csv file "
"(file name ending with .xlsx or .csv)"}, 400
if file_name.endswith('.xlsx'):
df = | pd.read_excel(request.files['file'], dtype=object, header=None) | pandas.read_excel |
from sklearn.manifold import TSNE
from clustering import silhouette as sil
from data_processing import MulticlusteringExperimentUtils as expUtils
# Keep the clustering experiments that involve outliers here
from clustering.KMeansVariations import kMeans_baseline, kMeans_baseline_high_iteration, kMeans_baseline_random_init, \
kMeans_baseline_4_clusters, kMeans_baseline_3_clusters, kMeans_baseline_2_clusters, kMeans_baseline_2_clusters_low_iter,\
kMeans_baseline_2_clusters_high_iter, kMeans_baseline_highest_iteration, kMeans_baseline_highest_iteration_2_clusters,\
kMeans_baseline_5_clusters, kMeans_baseline_3_clusters_random_high_iter, kMeans_baseline_3_clusters_random_med_iter
from clustering.tsne import makeTSNEPlot
import pandas as pd
# --- Remove all of the outliers for the big features ----
# average hold time
from data_processing.CleanDataUtils import feature_set, feature_set_complete_vectors_only,feature_set_more_even_vectors, feature_set_3_labels_completeSamplesOnly,feature_set_3_labels_AllSamples, feature_set_4049_reduced
from data_processing.dataUtils import getColumnZScores, removeOutliersByZScore
def removeOutliersAndNormalizeData(feature_set_input, threshold):
feature1 = 'avgSeekTime'
feature2 = 'avgHoldTime'
feature3 = 'averageNgramTime'
feature_set_outliers_removed = feature_set_input
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature1)
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature2)
feature_set_outliers_removed = getColumnZScores(pd.DataFrame(feature_set_outliers_removed), feature3)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature1, threshold)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature2, threshold)
feature_set_outliers_removed = removeOutliersByZScore(feature_set_outliers_removed, feature3, threshold)
feature_set_outliers_removed = expUtils.normalizeLabeledData( | pd.DataFrame(feature_set_outliers_removed) | pandas.DataFrame |
from numpy import *
import pandas as pd
import datetime
from datetime import timedelta
def sum_duplicated():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/train_2011_2012_2013.csv", sep=";", usecols=fields) # LECTURE
pd.DataFrame(x.groupby(('ASS_ASSIGNMENT', 'DATE', 'WEEK_END', 'DAY_WE_DS'), squeeze =False).sum()).to_csv("data/trainPure.csv", sep=';', encoding='utf_8')
def preprocessTOTAL(selectAss):
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
#################################################" Pour X
if(selectAss != False):#selection
x = x[x['ASS_ASSIGNMENT'] == selectAss]
del x['ASS_ASSIGNMENT']
x['YEAR'] = x['DATE'].str[0:4]
x['MONTH'] = x['DATE'].str[5:7]
x['DAY'] = x['DATE'].str[8:10]
x['HOUR'] = x['DATE'].str[-12:-8]
x['DATE'] = pd.to_datetime(x['DAY']+'/'+x['MONTH']+'/'+x['YEAR'])
##############pour avoir le call de 7jours avant en 's7'
tmp = pd.DataFrame()
tmp['HOUR'] = x['HOUR']
tmp['DATE'] = x['DATE']- timedelta(days=7)
#tmp.join(x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['DATE','HOUR'], how='left')
tmp[['DATE','HOUR', 's7' ]]=pd.merge(tmp[['DATE','HOUR']],x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['HOUR', 'DATE'], how='left')
x=pd.concat([x, tmp['s7']], axis=1)
x['s7'][pd.isnull(x['s7'])]=x['CSPL_RECEIVED_CALLS'][pd.isnull(x['s7'])]
file = ['joursFeries', 'vacances']
for f in file:
jf =pd.read_csv("data/"+f+".csv", sep=";")
for n in list(jf):
x[n]= x['DATE'].apply(lambda x: x.strftime('%d/%m/%Y')).isin(jf[n])
#######################################################pour xTest
xTest=pd.read_csv("data/submission.txt", sep="\t") # LECTURE
del xTest['prediction']
souvenir = xTest.copy()
if(selectAss != False):
xTest = xTest[xTest['ASS_ASSIGNMENT'] == selectAss]
souvenir = souvenir[souvenir['ASS_ASSIGNMENT'] == selectAss]
del xTest['ASS_ASSIGNMENT']
xTest['YEAR'] = xTest['DATE'].str[0:4]
xTest['MONTH'] = xTest['DATE'].str[5:7]
xTest['DAY'] = xTest['DATE'].str[8:10]
xTest['HOUR'] = xTest['DATE'].str[-12:-8]
xTest['DATE'] = pd.to_datetime(xTest['DAY']+'/'+xTest['MONTH']+'/'+xTest['YEAR'])
tmp = | pd.DataFrame() | pandas.DataFrame |
#%%
# ANCHOR IMPORTS
import sys
import pandas as pd, numpy as np
import pickle
import re
from sklearn import feature_extraction , feature_selection
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import Normalizer
from tqdm.autonotebook import trange, tqdm
import swifter
# Libraries for feature engineering.
import string
from collections import Counter # not necessary?
#from nnsplit import NNSplit
import spacy# .tokenizer.tokenize
from spellchecker import SpellChecker
# Other neat features.
from nltk.metrics.distance import edit_distance
from lexicalrichness import LexicalRichness
import syllables
import itertools
import textstat
# Stats
from scipy.stats import chisquare
#from statistics import mean
#%% Get spacy docs and save them to data to speed up development.
def get_docs(data, text_col='text_clean'):
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter")
data['docs'] = data[tect_col].apply(lambda x: nlp(x))
#%%
def listify(series, feature_name=str):
return [{feature_name: x[1]} for x in series.items()]
#%%
# Extract Baseline feature
# Character trigrams (morphological/lexical/semantic?).
def ngrams(train, test, params):
"""Extract character ngrams.
Args:
train (list): list of texts to fit the vectorizer.
test (list): list of texts to transform to feature space.
params (dict): parameters for the vectorizer construction
Returns:
[type]: [description]
"""
vectorizer = CountVectorizer(lowercase=params['ngrams']['lowercase'],
ngram_range=params['ngrams']['size'], # experiment with ranges, e.g. ngram_range=(3,3)
analyzer=params['ngrams']['type'], #, also try "char_wb"
max_features=params['ngrams']['max_vocab']) # max_features=10000
# fit count vecotorizer to preprocessed tweets.
#vectorizer.fit(train)
# Transform into input vectors for train and test data.
train_vectors = vectorizer.fit_transform(train) # using fit_transform due to better implementation.
#train_vectors = vectorizer.transform(train) #.toarray()
test_vectors = vectorizer.transform(test) #.toarray()
# Inspect with vectorizer.get_feature_names() and .toarray()
#inverse = vectorizer.inverse_transform(train)
#feature_names = vectorizer.get_feature_names()
#print(f'Train ({type(train_vectors)}) feature matrix has shape: {train_vectors.shape}')
#print(f'Test ({type(test_vectors)}) feature matrix has shape: {test_vectors.shape}')
#return vectorizer
return vectorizer, train_vectors , test_vectors
#return inverse
#%% ANCHOR EXTRACT LIWC
def parse_liwc(file, **args):
"""Parse a (left) aligned version of the LIWC lexicon.
Args:
file (str): filepath to lexcion (excel).
Returns:
DataFrame: df or dict
"""
df = pd.read_excel(file, skiprows=2)
# Handling merged columns in file
### Adapted from https://stackoverflow.com/a/64179518 ###
df.columns = df.columns.to_series()\
.replace('Unnamed:\s\d+', np.nan, regex=True).ffill().values
# Multindex to represent multiple columns for some categories.
df.columns = pd.MultiIndex.from_tuples([(x, y)for x, y in
zip(df.columns, df.columns.to_series().groupby(level=0).cumcount())])
### Accessed 26-04-2021 ###
# d = data.to_dict(orient='list')
### Adapted from https://stackoverflow.com/a/50082926
# dm = data.melt()
# data = dm.set_index(['variable', dm.groupby('variable').cumcount()]).sort_index()['value'].unstack(0)
### Accessed 26-04-2021 ###
# Concat the terms by column.
# d = dict()
#d = {column: value for key, value in dd.items()}
# for ki, wl in dd.items():
# nl = []
# k, i = ki
# # for w in wl:
# # if w not in nl:
# # d[k].append(wl)
# if k in d:
# d[k].append(wl)
# else:
# d[k] = wl
### Solution from https://stackoverflow.com/a/48298420 ###
# TODO experiment with not sorting the index? or reesrorting columns to mach the multiindex or just original df.columns.
df = df.stack().sort_index(level=1).reset_index(drop=True)
### Accessed 26-04-2021 ###
# Check that merged columns have the right number of terms.
# sum(isinstance(x, str) for x in terms['Funct'])
return df.to_dict(orient='list')
#%%
# Extract LIWC matches (lexical/semantic)
def liwc_match(parsed, d, extract=False, text_col='text_clean'):
"""Search a corpus for matches against LIWC (2007) categories.
Args:
parsed (DataFrame): a pandas df with the all categories of LIWC prepared.
d (str): a filepath to a pickle file with a corpus to search.
extract (bool, optional): Switch specifying whether or not to return a Dict for feature extraction or feature inspection/analysis. Defaults to False.
Returns:
dict: a dict with {liwc_cat1...n : count} for each datapoint in the corpus OR a dict a, a dataFrame and a Series with results of searching the categories against the matches (absolute counts per datapoint (as dict and DF) totals per category (Series)).
"""
# load data to search.
# Could do Series.count(regex) or df[clean_text] -> (joined) list?
if isinstance(d, pd.DataFrame) == False: # the ... analysis case.
data = pd.read_pickle(d)
text = list(d) # a single row/tweet?
if extract == True: # The extract case
data = d
text = data[text_col]
# Dict for search results.
results = dict()
pats = dict() # save patterns to dict for debugging.
# Loop through category-termlist pairs.
for cat, terms in tqdm(parsed.items()):
# Remove nans from term lists.
terms = [term.strip(' ') for term in terms if isinstance(term, str)]
# Compile re pattern from term list.
#pat = re.compile('|'.join(terms), flags=re.MULTILINE)
#pat = re.compile('|'.join(
# [r'\b' + t[:-1] if t.endswith('*') else r'\b' + t + r'\b' for t in #terms]))
### Adapted from https://stackoverflow.com/a/65140193 ###
pat = re.compile('|'.join([r'\b' + t[:-1] + r'\w*' if t.endswith('*') else r'\b' + t + r'\b' for t in terms]) , flags=re.MULTILINE | re.IGNORECASE)
### Accessed 27-04-2021 ###
pats[cat] = pat
#i, char = enumerate(j_terms)
# for term in terms:
# i = 0
# try:
# pat = re.compile(term)
# #print(pat, counter,'\n')
# i +=1
# except:
# print('error here:\n'.upper(),pat, i)
# Aggregate matches per category into dict. storing tweet id's preserved in the source data.
#results[cat] = pat.finditer(text.values)
# For that, join values into list of lists -> re.match -> see below
# results[cat][re.match(pat)] = re.finditer(pat, row_list)
# if extract == True: You can't normalize since this isn't tokenized.
# results[cat] = text.apply(lambda x: x.str.count(pat) / len(x))
# else:
results[cat] = text.str.count(pat)
#results[cat] = text.swifter.apply(lambda x: re.finditer(pat, x))
# Store results in DataFrame
df_results = pd.DataFrame.from_dict(results)
# Totals per category
df_totals = df_results.sum().sort_values(ascending=False)
if extract == True:
# Export results to {index : {cat : count}...} for easy vectorization.
results_per_row = df_results.to_dict(orient='records') # or orient='index'? -> DictVectorizer
return results_per_row
return {'results' :
{'matches_dict' : results,
'matches_df' : df_results,
'matches_total': df_totals
},
'regex_pats' : pats
}
#%%
def norm_freqs(data, expression, count_name=str, normalize=True, analyze=True):
"""Get frequencies (normalized = optional) of a regex pattern in a Series with one or more strings.
Args:
data (DataFrame): a dataframe with texts to extract frequencies from.
expression (re.compile): a regex pattern to count occurrences of in each text.
count_name (str, optional): a name for the counted feature. Defaults to str.
normalize (bool, optional): [description]. Defaults to True.
Returns:
list: list of dicts with key = frequency name, value = frequency.
"""
# List to store frequencies
# freqList = list()
# Loop through each entry in the list of strings.
# for e in stringList:
# # Join to a regular string
# text = ' '.join(e)
# # Construct a dict for each entry with freuncies.
# c = {count_name : len([char for char in text if char in expression])}
# Get frequencies of a regex in a pandas column, normalize if set to True.
c = data.apply(lambda x: len(re.findall(
expression, x))/len(x) if normalize == True else len(re.findall(expression, x)))
### Adapted from https://stackoverflow.com/a/45452966 ###
# Cast frequencies Series to list of dicts.
cList = [{count_name: x[1]} for x in c.items()]
### Accessed 10-05-2021 ###
if analyze == True:
return cList
else:
return c
def binary_freq(data, expression, feature_name=str, analyze=True):
"""Search data for occurrences of a binary feature as a regex.
Args:
data (pd.Series): a series with text instances.
expression (re.compile): a regex or string to search for.
feature_name (str, optional): a name for the feature to extract. Defaults to str.
Returns:
list: a list with a dict mapping feature name to 1 or 0 (true/false) based on occurrence in texts.
"""
b = data.str.contains(expression).astype(int) # cast bools to 0/1
if analyze == True:
bList = [{feature_name: x[1]} for x in b.items()]
return bList
else:
return b
#%% ANCHOR extract character and word level features
# Extract character-level features (lexical/morphological).
def get_cl(data, text_col='text_clean', analyze=True):
# 0. Cast data text col .to_list()
# 1. Normalized punctation frequency.
# # Using pandas instead of lists + counter + dicts.
# df_results = pd.DataFrame({'text': textList})
# #p_pat = re.compile(r'[!"\$%&\'()*+,\-.\/:;=#@?\[\\\]^_`{|}~]*')
# p_pat = re.compile(re.escape(string.punctuation))
# df_results['punct'] = df_results.text.str.count(p_pat)
# the whole series
#train['text_clean'].str.count(p_pat)
df_punc_freq = data[text_col].apply(lambda x: len([char for char in ' '.join(x) if char in string.punctuation]) / len(' '.join(x)))
#return punc_freq, df_punc_freq
#df_punc_freq = pd.DataFrame.from_records(punc_freq)
# Add to cl dict.
#cl_results['punc_freq'] = punc_freq
#2. Specific characters (also normalized)
# 2.1 digits
d_pat = re.compile(r'\d' , re.M)
df_digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True, analyze=False)
#return df_digits
# 2.2 Whitespace chars.
ws_pat = re.compile(r' ', re.M) # NOTE just using actual whitespace instead of \s
df_whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True, analyze=False)
# 2.3 tab characters NOTE Doesn't occur in either corpus.
# tab_pat = re.compile(r'\t', re.M)
# tabs = norm_freqs(data[text_col], tab_pat, count_name='tab_freqs', normalize=True)
# 2.4 line break characters
br_pat = re.compile(r'[\r\n\f]', re.M)
df_lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True, analyze=False)
# 2.5 Upperchase chars (per all chars)
up_pat = re.compile(r'[A-Z]', re.M) # Decide whether to be greedy about *all* uppercase chars or to be lazy (below). Also, @USER mentions are counted now. Can be excluded with \b(?!USER\b)[A-Z]. Try doing [^a-z\W] - caret negates the range of chars.
#up_pat = re.compile(r'(?<![a-z])*[A-Z](?![a-z])*' , re.M) # Only count chars if they are not a one-off in the beginning of words.
df_upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True, analyze=False)
# 2.6 Special chars other than punctuation. NOTE Doesn't make much sense when using a full punctuaion set..
spc_pat = re.compile(r"[^a-z \.,!?':;\s]", re.M)
df_spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters", analyze=False)
#3. Repeated characters (binary features) # NOTE if you want counts of each repeated char, consider just defining it with regexes and then using norm_freqs, normalize=False?
# 3.1 question marks
quest_pat = re.compile(r'\?{2,}', re.M)
df_rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep', analyze=False)
# 3.2 periods (ellipsis)
per_pat = re.compile(r'\.{2,}', re.M)
df_rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep', analyze=False)
# 3.3 exclamation marks
excl_pat = re.compile(r'!{2,}', re.M)
df_rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep', analyze=False)
# 4 Contains equal signs
eq_pat = re.compile(r'=', re.M)
df_equals = binary_freq(data[text_col] , eq_pat , feature_name='equals', analyze=False)
# 5 Quotes in chars
#quotes = data[text_col].apply(lambda x: len(re.findall(quot_pat, x)) / len(x)) # per character --- works.
#quotes_char = [{'quotes' : x[1]} for x in qoutes.items()]
if analyze == True:
#punc_freq = listify(df_punc_freq, feature_name='char_punc_freq') # new Alternative to punc_freq with dict comprehension.
textList = data[text_col].to_list()
### Old approach to punc_freqs for analysis.
cl_results = dict() # dict to store results.
punc_freq = list()
for e in textList:
text = ' '.join(e)
# Build dict with counts of all punct characters.
# The first c example does it per punctuation character, the second for all.
# Each count is normalized by total number of chars in the each string.
# NOTE not using regexes here. Single quotes/apostrophes/contractions are counted as well.
#c = {char:count/len(text) for char, count in Counter(text).items() #if char in string.punctuation}
# This should generalize to regex matches.
c = {'char_punc_freq': len([char for char in text if char in string.punctuation])/len(text)}
punc_freq.append(c)
digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True)
whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True)
lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True)
upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True)
spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters")
rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep')
rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep')
rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep')
equals = binary_freq(data[text_col] , eq_pat , feature_name='equals')
# Store results
cl_results['char_punc_freq'] = punc_freq
cl_results['digit_freq'] = digits
cl_results['whitespace_freq'] = whitespaces
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results['linebreak_freq'] = lbreaks
cl_results['uppercased_char_freq'] = upchars
cl_results['special_char_freq'] = spc
cl_results['repeated_questionmark'] = rep_quest
cl_results['repeated_periods'] = rep_per
cl_results['repeated_exclamation'] = rep_excl
cl_results['contains_equals'] = equals
return cl_results #punc_freq # (punc_freq , cl_results)
# Store results as df for much easier vectorization...
else:
cl_results_df = pd.DataFrame()
cl_results_df['char_punc_freq'] = df_punc_freq #✅
#pd.concat(cl_results_df)
# Store results
cl_results_df['digit_freq'] = df_digits #✅
cl_results_df['whitespace_freq'] = df_whitespaces #✅
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results_df['linebreak_freq'] = df_lbreaks #✅
cl_results_df['uppercased_char_freq'] = df_upchars #✅
cl_results_df['special_char_freq'] = df_spc #✅
cl_results_df['repeated_questionmark'] = df_rep_quest #✅
cl_results_df['repeated_periods'] = df_rep_per #✅
cl_results_df['repeated_exclamation'] = df_rep_excl #✅
cl_results_df['contains_equals'] = df_equals #✅
return cl_results_df
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_cl(test_df, text_col='text_clean', analyze=False)
# Extract word-level features (lexical/morphological)
def get_wl(data, text_col='text_clean', analyze=False, docs=[]):
# SpaCy pipe for rule based sentence splitting.
#blank_nlp = spacy.blank('en') # spacy.load('en_core_web_sm')
# sentencizer = blank_nlp.add_pipe("sentencizer")
# morphologizer = blank_nlp.add_pipe('morphologizer')
# blank_nlp.initialize() #
# print(nlp.pipe_names)
print('Configuring spacy for word level')
nlp = spacy.load('en_core_web_sm', disable=["lemmatizer", 'ner'])
# disable parser in favor of senter and sentencizer due to speed https://spacy.io/models
nlp.disable_pipe("parser")
nlp.enable_pipe("senter")
# Load spellchecker
spell = SpellChecker()
# load exceptions to spellchecker (Twitter, covid specifc)
try:
spell.word_frequency.load_text_file('./utils/spell_additions.txt')
except:
pass
# 1 Get lengths (total/avg words, sentence)
# rewrite features as attributes of Lengths objects?
# class Lengths:
# def __init__(self, first_feat, second_feat):
# pass
#textList = data[text_col].to_list()
wl_results = dict()
# print('TOKENIZING WORD-LEVEL FEATURES')
# data to docs
if len(docs) <= 0:
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#assert len(docs) == len(data[text_col])
# get list of sentences.
sents_c = docs.apply(lambda x: [s for s in x.sents])
# Words only (including numbers and @mentions)
sents_w = docs.apply(lambda x: [[t.text for t in s if\
t.is_punct == False and
t.is_space == False]\
for s in x.sents])
# list of *word* tokens in entire tweet.
toks = docs.apply(lambda x: [t.text for t in x if t.is_punct == False and\
t.is_space == False]) # could have used data['tokens_clean]
# alphabetic tokens only. (for spell checking)
toks_alpha = docs.apply(lambda x: [t.text for t in x if t.is_alpha == True])
# Debugging getting empty lists of alphabetic tokens.
#return pd.DataFrame({'tokens' : toks, 'alpha_tokens': toks_alpha})
toks_morph = docs.apply( lambda x: [t for t in x if t.is_alpha == True])
# print('\n GETTING WORD-LEVEL FEATURES')
# 1.1 total length of tweet in words
# c = {'total_words' : int}
# for doc in docs:
w_total_series = toks.map(len)
# 1.2 avg word length
awl = toks.apply(lambda x: sum(len(w) for w in x) / len(x))
# build dict with keys from list contained in feature_params value for lexical features > word_level. Check if they are there and populate them with the dicts below accordingly. Else don't.
# 1.3.1 avg sentence length (words)
asl_w = sents_w.apply(lambda x: sum(len(s) for s in x) / len(x))
# 1.3.2 avg sentence length (characters)
#asl_c = apply(lambda x: sum([len(''.join(s.text)) for s in x]))
asl_c = sents_c.apply(lambda x: sum(len(''.join(s.text)) for s in x) / len(x))
# 2.1 number of uppercased words.
uws = toks_alpha.apply(lambda x: len([t for t in x if t.isupper() == True]) / len(x) if len(x) > 0 else 0.0)
# 2.2 number of short words
# use len of token <=3
sws = toks_alpha.apply(lambda x: len([t for t in x if len(t) <=3]) / len(x) if len(x) > 0 else 0.0)
# 2.3 number of elongated words
# use regex \b\w{3,}\b
elw_pat = re.compile(r'(\w)\1{2,}', re.M)
elws = toks_alpha.apply(lambda x: len([t for t in x if elw_pat.search(t)]) / len(x) if len(x) > 0 else 0.0)
# 2.4 number of number-like tokens (both digits and numerals)
nss = docs.apply(lambda x: len([t for t in x if t.like_num == True]) / len(x))
# 2.5 frequency of specific verb tenses
pst = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Past']]).map(len).divide(toks_alpha.map(len))
prs = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Pres']]).map(len).divide(toks_alpha.map(len)) #NOTE using series.divide instead for if/else check with regular might give a problem with vectorizers.
adj_pos = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Pos']]).map(len).divide(toks_alpha.map(len))
adj_c_s = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Cmp'] or t.morph.get('Degree') == ['Sup']]).map(len).divide(toks_alpha.map(len))
# Here you could add future tense, mood etc.
# 2.6 Frequency of OOV words (according to spaCy model)
# token.is_oov
# 3. Frequencies of emotes/jis.
e = data['emotes'].apply(lambda x: len(x[0] + x[1])).divide(toks.map(len)) # normalized by tokens.
# 4. Non-standard spelling. Reconsider including this. It mostly captures proper names and acronyms if it has to be this fast.
sc = toks_alpha.apply(lambda x: spell.unknown(x)).map(len).divide(toks_alpha.map(len))
# 5. number of quoted words
# NOTE normalized by words (in match / in tweet)
quot_pat = re.compile(r"(\".+?\"|\B'.+?'\B)") # should this be quot_pat = re.compile(r("\".+?\"|\B'.+?'\B")) #
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x).split(' ')).map(len).divide(toks_alpha.map(len)) # per word (split on whitespace).
print('Tokenizing quote spans')
quotes = data[text_col].swifter.apply(lambda x:
[t for t in nlp(' '.join(re.findall(quot_pat, x))) if t.text.isalnum()]).map(len).divide(toks.map(len))
#return pd.DataFrame({'org_text': data[text_col],'alpha_toks': toks_alpha, 'quoted_toks' : quotes, 'quoted_lens' : quotes_lens})
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x)).map(len).divide(toks_alpha.map(len)) # not finished. need to tokenize matches.
#quotes = sents_c.apply(lambda x: len([re.findall(quot_pat, s) for s in x]) / len(x))# per sentence - doesn't work.
# 6. Vocab richness/complexity
# 6.1 Type-token ratio.
tt = toks_alpha.apply(lambda x: len(set(x)) / len(x) if len(x) > 0 else 0.0) # could use Counter instead of set()
# 6.2.1 Hapax legomena
### Adapted from https://stackoverflow.com/a/1801676 ###
hlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 1]) / len(x) if len(x) > 0 else 0.0) # could also lower with list comprehension.
### accessed 13-05-2021 ###
# 6.2.2 Hapax dislegomena (words that occur twice only)
hdlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 2]) / len(x) if len(x) > 0 else 0.0)
# Here you would implement complexity measures
#- Brunet's W Measure
#- Yule's K Characteristic
#- Honore's R Measure
#- Sichel's S Measure
#- Simpson's Diversity Index
# 7. syllable frequencies #NOTE this is averaged/normalized syllable frequncies. NOTE the syllables docs suggest using cmudict for accuracy over speed.
sfr = toks_alpha.apply(lambda x: sum([syllables.estimate(w) for w in x]) / len(x) if len(x) > 0 else 0.0) # could also use statistics.mean for all of these averages..
# 8. Readability
# Flesch-Kincaid reading ease
fk = data[text_col].apply(lambda x: textstat.flesch_reading_ease(x))
# # 8.1 Automated Readability Index
# ari = data[text_col].swifter.apply(lambda x: textstat.automated_readability_index(x))
# r_ari = listify(ari, feature_name='automated_readability_index')
# # 8.2 Coleman-Liau index
# cli = data[text_col].swifter.apply(lambda x: textstat.coleman_liau_index(x))
# r_cli = listify(cli, feature_name='coleman_liau_index')
# # 8.3 Dale Chall Readability Index
# dci = data[text_col].swifter.apply(lambda x: textstat.dale_chall_readability_score(x))
# r_dci = listify(dci, feature_name='dale_chall_index')
# # 8.4 Gunning Fog Index
# gfi = data[text_col].swifter.apply(lambda x: textstat.gunning_fog(x))
# r_gfi = listify(gfi, feature_name='gunning_fog_index')
# 8.5 Consensus based on all tests in textstat.
# consensus = data[text_col].swifter.apply(lambda x: textstat.text_standard(x, float_output=True))
# r_consensus = listify(consensus, feature_name='readability_consensus_score')
# Could add basic sentiment with doc.token.sentiment?
# Store results TODO store each list of dicts in separate dict on the same level.
# wl_results = {
# {'length_features' : w_total, w_len_avg, asl_w, asl_c},
# {'specific_w_frequencies' : upper_ws, shortws, elongws, nums, past_freq, pres_freq, adj_positives, adj_cmp_sup ,ems},
# {'nonstandard_spelling' : s_check},
# {'words_in_quotes' : quot_ws},
# {'richess/complexity' : ttr, hlgs, hldgs},
# {'syllable frequencies' : syl_freq},
# {'readability' : r_fk, r_ari, r_cli, r_dci, r_gfi, r_consensus}
# }
# print('\nSTORING RESULTS')
# print('DONE')
if analyze == True:
w_total = [{'len_total_words': x[1]} for x in toks.map(len).items()]
w_len_avg = [{'avg_word_length' : x[1]} for x in awl.items()]
asl_w_avg = [{'avg_sent_len_words': x[1]} for x in asl_w.items()]
asl_c_avg = [{'avg_sent_len_chars' : x[1]} for x in asl_c.items()] # move this to character level.
upper_ws = [{'upper_words': x[1]} for x in uws.items()]
shortws = [{'short_words': x[1]} for x in sws.items()]
elongws = [{'elongated_words' : x[1]} for x in elws.items()]
nums = listify(nss, feature_name='numerical_tokens_frequency')
past_freq = listify(pst, feature_name = 'past_tense_frequency')
pres_freq = listify(prs, feature_name='present_tense_frequency')
adj_positives = listify(adj_pos, feature_name='positive_adjectives')
adj_cmp_sup = listify(adj_c_s, feature_name='comp_and_sup_adjectives')
ems = [{'emote_frequencies': x[1]} for x in e.items()]
s_check = [{'nonstandard_words': x[1]} for x in sc.items()]
quot_ws = listify(quotes, feature_name = 'quotes_in_words')
ttr = [{'type-token_ratio': x[1]} for x in tt.items()]
hlgs = listify(hlg, feature_name= 'hapax_legomena')
hdlgs = listify(hdlg, feature_name='hapax_dislegomena')
syl_freq = [{'avg_syllable_freq': x[1]} for x in sfr.items()]
r_flk = [{'flesch_kincaid_reading_ease' : x[1]} for x in fk.items()]
# Store results in dict.
wl_results['total_word_len'] = w_total
wl_results['avg_word_len'] = w_len_avg
wl_results['avg_sentence_len_words'] = asl_w_avg
wl_results['avg_sentence_len_chars'] = asl_c_avg
wl_results['uppercased_words'] = upper_ws
wl_results['short_words'] = shortws
wl_results['elongated_words'] = elongws
wl_results['numberlike_tokens'] = nums
wl_results['past_tense_words'] = past_freq
wl_results['present_tense_words'] = pres_freq
wl_results['positive_adjectives'] = adj_positives
wl_results['comp_and_sup_adjectives'] = adj_cmp_sup
wl_results['emotes'] = ems
wl_results['nonstandard_spelling'] = s_check # exclude?
wl_results['quoted_words'] = quot_ws
wl_results['type_token_ratio'] = ttr
wl_results['hapax_legomena'] = hlgs
wl_results['hapax_dislegomena'] = hdlgs
wl_results['syllable_freqs'] = syl_freq #takes too long?
wl_results['readability_flesch_kincaid'] = r_flk
# wl_results['readability_ari'] = r_ari
# wl_results['readability_coleman_liau'] = r_cli
# wl_results['readability_dale_chall'] = r_dci
# wl_results['readability_gunning_fog'] = r_gfi
#wl_results['readability_consensus'] = r_consensus
return wl_results
else:
# Build dataframe
wl_results_df = pd.DataFrame()
wl_results_df['total_word_len'] = w_total_series #✅
wl_results_df['avg_word_len'] = awl #✅
wl_results_df['avg_sentence_len_words'] = asl_w #✅
wl_results_df['avg_sentence_len_chars'] = asl_c #✅
wl_results_df['uppercased_words'] = uws #✅
wl_results_df['short_words'] = sws #✅
wl_results_df['elongated_words'] = elws #✅
wl_results_df['numberlike_tokens'] = nss #✅
wl_results_df['past_tense_words'] = pst #✅
wl_results_df['present_tense_words'] = prs #✅
wl_results_df['positive_adjectives'] = adj_pos #✅
wl_results_df['comp_and_sup_adjectives'] = adj_c_s #✅
wl_results_df['emotes'] = e #✅
wl_results_df['nonstandard_spelling'] = sc #✅
wl_results_df['quoted_words'] = quotes # ✅
wl_results_df['type_token_ratio'] = tt #✅
wl_results_df['hapax_legomena'] = hlg #✅
wl_results_df['hapax_dislegomena'] = hdlg #✅
wl_results_df['syllable_freqs'] = sfr #✅
wl_results_df['readability_flesch_kincaid'] = fk #✅
return wl_results_df
#return get_wl(data)#get_cl(data) , get_wl(data)
#%%
# Debugging
# test_df = train.iloc[:50, :]
# test = get_wl(test_df, analyze=False)
# %%
#%%
# Extract sentence-level features (syntactic)
def get_sl(data, text_col = 'text_clean',cv=None , train=False, analyze=False):
# load spacy model.
print('Loading spacy model')
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter") #TODO Added senter to get_sl while passing on docs for speed.
# For POS tags, you could map a pos tag sequence/vector to the tweet.
# Initialize CounVectorizer for pos ngrams. store pos tags in separate column and transform with sklearn-pandas per column instead.
if train == True:
cv = CountVectorizer(analyzer='word', ngram_range=(1,3))
else:
cv = cv
# Retoknize the text
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#toks = docs.apply(lambda x: [t.text for t in x]) # not used.
#return pd.DataFrame({'docs' : docs.map(len) , 'toks': toks.map(len)})
# Frequencies
# 1.1 frequencies of stop words (i.e. function words)
sts = docs.apply(lambda x: len([t.text for t in x if t.is_stop == True]) / len(x)) # normalized by all tokens (including numbers and punct.)
# 1.2 frequencies of punctuation
pnct = docs.apply(lambda x: len([t.text for t in x if t.is_punct == True]) / len(x))
# 1.3 Frequencies of roots (normalized by total number of words in tweet).
rts = docs.apply(lambda x: len([(t, t.dep_) for t in [t for t in x if t.is_space == False] if t.dep_ == 'ROOT']) / len(x)) # This still includes number-like tokens, punctuation and mentions, since these are relevant in the dependency trees. Normalization could account for whitespaces, but doesn't have to.
# 3. POS frequencies.
# Extract pos tags:count (use Counter)
pos = docs.apply(lambda x: [t.pos_ for t in x if t.text.isalnum() == True])
pos_freq = docs.apply(lambda x: {p:c/len([t for t in x if t.text.isalnum() == True]) for p, c in Counter([t.pos_ for t in x if t.text.isalnum() == True ]).items()}) # normalized by alphanumeric tokens (since punctuation frequencies are captured separately).
#pos_freq = [{k:v} for k, v in pfreq.items()]
#return pd.DataFrame({'text' : data[text_col] , 'tokens' : toks, 'pos' : pos})
# 4. POS ngrams (n=uni-bi-tri) - TODO move to ngrams
# join pos tags into strings for CountVectorizer -> return as special case. Do a type check in the lookup or vectorize function that just passes the matrix on. OR pass on POS strings to vectorize in the vectorize function?
#print('fit/transforming posgrams')
pgrams = pos.str.join(' ').to_list()
if train == True:
pgram_matrix = cv.fit_transform(pgrams)
#return cv, pgram_matrix
else:
pgram_matrix = cv.transform(pgrams)
# Sketch of countvectorizing pos ngrams.
#cv.fit_transform(test.str.join(sep=' ').to_list()) # This works. consider how to get pos ngrams and still make them interpretable in the corpora - e.g. most frequent triplets? Does that even really tell you anthing? You could Counter or use a pandas method to get most frequent combination?
# {k:v for k, v in Counter(cv.get_feature_names()).items()}
# Note Counter has counter.most_common(n)
# Could use nltk.util.ngrams(sequence, n) as suggested here https://stackoverflow.com/questions/11763613/python-list-of-ngrams-with-frequencies
# 6. Sentiment?
# sentis = docs.apply(lambda x: sum([t.sentiment for t in x])) # doesn't work. needs training?
#return pd.DataFrame({'n_sents_spacy' : n_sents, 'n_sents_tstat' : n_sents_tstat})
if analyze == True:
# Store results.
stop_freq = listify(sts, feature_name='stopword_frequency')
punct_freq = listify(pnct, feature_name='punctuation_freq')
root_freq = listify(rts, feature_name='root_frequencies')
syn_results = {'stopword_freq': stop_freq,
'syn_punc_freq' : punct_freq,
'root_freq': root_freq,
'pos_freq' : list(pos_freq),
'pos_ngrams' : pgram_matrix}
return cv, syn_results
else:
syn_results_df = pd.DataFrame()
syn_results_df['stopword_freq'] = sts
syn_results_df['syn_punc_freq'] = pnct
syn_results_df['root_freq'] = rts
#syn_results_df['pos_freq'] = list(pos_freq)
#syn_results_df['pos_ngrams'] = pgram_matrix
return docs, cv, pgram_matrix, syn_results_df
# To call on test data, remember to call it on the cv returning after calling it on the training data - call it 'train_cv' in model.py
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_sl(test_df, train=True, analyze=True)
#%% ANCHOR testing get_syn
# extract_feats(test_df, analyze=True, train=True)
# NOTE when extracting in model.py, call twice instead of once.
#train.columns.get_loc('text_clean')
# test_df = train.iloc[:50, :] # versus list version: train_text[:20]
# test = get_syn(test_df)
# # val_test = get_lexical(train_text[:5])
#%%
#%%
# Extract document-level features (structural)
def get_dl(data, text_col='text_clean', analyze=True, docs=[]):
# 1. Number of sentences
if len(docs) <= 0:
print('Configuring spacy model for document level')
nlp = spacy.load('en_core_web_sm', disable=['lemmatizer', 'parser','tagger','ner'])
nlp.enable_pipe('senter') # this is the main diff between wl, sl and dl.
docs = data[text_col].swifter.apply(lambda x: nlp(x))
ns = docs.apply(lambda x: len([s for s in x.sents])) #en_web_sm is not as accurate as blank or textstat.
# ns = data[text_col].apply(
# lambda x: textstat.sentence_count(x))
# 2. Number of user mentions - absolute counts.
ms = data[text_col].str.count('@user', flags=re.I|re.M)
# Could be expanded to include hashtags and urls in the future here.
if analyze == True:
n_sents = listify(ns, feature_name = 'number_of_sentences')
ments = listify(ms, feature_name = 'number_of_mentions')
struc_results = {'n_sents': n_sents, 'n_mentions': ments} # before skiping listify.
#struc_results = {'n_sents' : ns, 'n_mentions' : ms}
return struc_results
else:
struc_results_df = pd.DataFrame()
struc_results_df['n_sents'] = ns #✅
struc_results_df['n_mentions'] = ms #✅
return struc_results_df
#%%
# Testing get_struc.
#test = get_dl(test_df, analyze=False)
#%%
# ANCHOR function to lookup and get specific [{features: x.x}] from extraction funct.
def feature_lookup(f_param_dict, extracted_features):
feature_name1 = [{'feature_name' : 0.0}]
for var in locals():
if var in f_param_dict['some_feature_cat1']:
return locals()[var]
# also look into dpath, dict-toolbox2
#%%
# Test feature_lookup
# t = {'some_feature_cat1': ['feature_name1', 'feature_name2']}
# feature_lookup(t)
#%%
def conc_features(matrixList):
# Concatenate feature vectors
# pass a list or dict of matrices and do list/dict comprehension/unpacking?
#combined_features = hstack([feature_vector1, feature_vector2], 'csr')
combined_features = hstack(matrixList, 'csr')
return combined_features
#%%
def d_vectorize(selected_feats, train=False, dv=None):
# Old approach: Vectorize all generated lists of dicts (stored in a dict or list?).
# if train == True:
# dv = DictVectorizer()
# #X = d.fit_transform(dictList)
# # Either store as list.
# dvList = []
# matList = []
# # Or in single dict
# #matDict = dict() using dv as a key just overwrites the value since they are all identical. Nesting the dict just complicates things even more...
# if train == True:
# # Iterate through feature lists of dictionaries (lexical, syntactic, structural)
# for feature_name, feat_list in selected_feats.items():
# #print(feature_name, feat_list)
# #return
# if feature_name == 'pos_ngrams': # Check for pos_ngrams (already vectorized)
# matList.append(feat_list) # if pos_ngrams feat matrix, just append it.
# #matDict[dv] = feat_list
# continue
# if train == True:
# feat_matrix = dv.fit_transform(feat_list)
# # NOTE storing each vectorizer
# dvList.append(dv)
# matList.append(feat_matrix)
# # This is the test case
# # The test case. transforming test data to fitted individual dvs.
# if train == False: #iterate through each dv and all the feature lists.
# feat_lists = []
# # this has to only fit once per feature dv-featurelist pair.
# for feature_name, feat_list in selected_feats.items():
# if feature_name == 'pos_ngrams':
# matList.append(feat_list)
# continue
# feat_lists.append(feat_list)
# #return(feat_lists)
# for dv, featList in list(zip(dvs, feat_lists)): # enable this to loop through both dvs and features.
# #print(dv, featList)
# feat_matrix = dv.transform(featList) # this needs to be passed its corresponding dv. if you store in zip/list, it should have the same, fixed order. but how to iterate?
# matList.append(feat_matrix)
# #matDict[dv] = feat_matrix
# # Is LIWC a separate case? Should be the same as engineered features.
# #return matDict#dv, matList #matDict.values() should be list of matrices equal to number of features. To be concatenated.
# return dvList, matList
# New approach - using dfs with selected features.
# 1. Get list of dicts, row-wise from selected features DF.
feats = selected_feats.to_dict('records')
if train == True:
dv = DictVectorizer()
feats_vecs = dv.fit_transform(feats)
return dv , feats_vecs
else:
feats_vecs = dv.transform(feats)
return dv, feats_vecs
#%%
####
# test_df = train.iloc[:50,:]
# sent_cv_train, extracted_train = extract_feats(test_df, text_col='text_clean', analyze=False, train=True, feature_pms=feature_params)
# sent_cv_test, extracted_test = extract_feats(val.iloc[:50,:], text_col='text_clean', analyze=False, train=False, cv=sent_cv_train, feature_pms=feature_params)
# train_dv, train_vecs = d_vectorize(train_selected_feats_df, train=True)
# test_dv, test_vecs = d_vectorize(test_selected_feats_df, train=False, dv=train_dv)
####
#test = d_vectorize(extracted_test, train=False, dvs=train_dvs)
# Then d_vectorize LIWC matches.
# Then concat all of the vectorized features.
# Then fit model!
#%%
def extract_feats(data, text_col='text_clean', feature_pms=dict(), analyze=False, cv=None, train=False):
# Data = dataframe - can be recast by child functions.
# See if resetting data index speeds up extraction.
data.reset_index(drop=True, inplace=True)
# lowercase all @USER mentions. An artifact from preprocessing.
data[text_col] = data[text_col].str.replace(
'@USER', '@user') # , inplace=True)
all_features_dict = dict()
all_features_df_list = []
selected_features = dict()
# 1. Call each of the extractor functions
# 1.3 Sentence-level # TODO moved up to pass docs to other extraction functs for speed.
print('Sentence level features')
if analyze == True:
docs = []
sent_cv, sent_lvl = get_sl(
data, text_col=text_col, cv=cv, analyze=analyze, train=train)
else:
docs, sent_cv, pgram_matrix, sent_lvl = get_sl(data, text_col=text_col, cv=cv, analyze=analyze, train=train)
# 1.1 Character-level (10 features)
print('Character level features')
char_lvl = get_cl(data, text_col=text_col, analyze=analyze)
# 1.2 Word-level
print('Word level features')
word_lvl = get_wl(data, text_col=text_col, analyze=analyze, docs=docs)
#sent_lvl = word_lvl.copy(deep=True)
#return sent_lvl
# if train == False:
# sent_cv, sent_lvl = get_sl(data, text_col=text_col, analyze=analyze)
# 1.4 Document-level
print('Document level features')
doc_lvl = get_dl(data, text_col=text_col, analyze=analyze, docs=docs)
#return doc_lvl
# Return all features if extracting for feature analysis. LIWC is analyzed separately.
if analyze == True:
# Store in dict
all_features_dict['character_level'] = char_lvl
all_features_dict['word_level'] = word_lvl
all_features_dict['sentence_level'] = sent_lvl # Maybe pop pgrams matrix into separate var/container?
all_features_dict['document_level'] = doc_lvl
return sent_cv, all_features_dict # pass sent_cv on to analyze_feats from here.
# Old approaches
# Option 1 - extracting flat list (of n instances) (of dicts with n features) to vectorize in one go.
# for feat_cat, feature_name in feature_pms['engineered'].items():
# if feat_cat in all_features.keys():
# selected_features[feat_cat] = all_features[feat_cat].values()
# return selected_features
# TODO how to make sure that all features align? Pandas? hstack before fitting?
# Option 2 - extract individual lists of [{'feature1' : feature_value}... {'feature2' : feature_value}] for each feauture?
# Iterate through features to pass on, given parameters in parameter dict.
# Get a flat list of all desired target features.
#target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
# Lookup and retrieve each feature from all_features and store in selected_features
# Works, but return that awkward df with individual dicts.
# for feat_level, feat_name in all_features.items():# outer level {'feature_level': 'feature_name': [{'feature' : feature_val}]}
# for fn, fl in feat_name.items():
# if fn in target_feats:
# selected_features[fn] = fl
# Return selected features
# 2. return selectively for classification
if analyze == False:
# Get a flat list of all desired target features.
target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
#return char_lvl, word_lvl, sent_lvl, doc_lvl
# Concatenate feature dfs for each level horizontally.
#all_feats_df = pd.concat([char_lvl, word_lvl, sent_lvl, doc_lvl], axis=1, join='inner') # works.
all_feats_df_list = [char_lvl, word_lvl, sent_lvl, doc_lvl]
# Mitigating duplicate indeces in dfs..
[df.reset_index(inplace=True, drop=True) for df in all_feats_df_list]
# 1.5 LIWC features
# parsed_liwc is called in the main namespace.
if feature_pms['liwc'] == True:
liwc_feats = pd.DataFrame.from_records(
liwc_match(parsed_liwc, data, extract=True))
#selected_features['liwc_counts'] = liwc_feats # store LIWC straight in selected_feats dict.
# index liwc_feats with data.index
liwc_feats.set_index(data.index, inplace=True)
all_feats_df_list.append(liwc_feats)
#return liwc_feats
#return sent_cv, all_features
# concat liwc features to df selected features.
# Concat all feature dfs.
#try:
all_feats_df = pd.concat(all_feats_df_list, axis=1, join='inner')
#print(all_feats_df)
#except:
# return all_feats_df_list# , [len(df) for df in all_feats_df_list]
# Select columns from all features df unless they are pos_ngrams. could add pos_freqs here.
# return all_feats_df 35+64=99 feats.
selected_feats_df = all_feats_df[[fn for fn in target_feats if fn != 'pos_ngrams']]
#return all_feats_df, target_feats
return sent_cv, pgram_matrix, selected_feats_df
#%% ANCHOR procedure for feature extraction.
# test_df = train.iloc[:50,:]
# #sent_cv, train_feats_df = extract_feats(test_df, feature_pms = feature_params, analyze=False, train=True)
# # Parse LIWC
# parsed_liwc = parse_liwc('../../../Data/LIWC2007dictionary poster.xls', text_col=text_col)
# # This is just a test of extraction with liwc.
# liwc_test = extract_feats(test_df, feature_pms = feature_params, analyze=False, train=True)
# # Dict_vectorize-fit_transform train.
# train_en_feat_vec = d_vectorize(train_selected_feats_df, train=True)
# # Combine feature matrices: # also use ngrams in model.py.
# train_feats_combined = conc_feat([train_pgram_matrix , train_en_feat_vec])
# # Extract test_feats
# sent_cv, test_pgram_matrix, test_selected_feats_df = extract_feats(val.iloc[:50,], feature_pms= feature_params, analyze=False, train=False, cv=sent_cv)
# # Dict_vectorize-transform test with train_dv.
# test_en_feat_vec = d_vectorize(test_selected_feats_df, train=False)
# -> concat pgram matrices and each selected feature df after dictvectorizing them.
####
#analysis = analyze_feats(train_feats_dict) # analysis case
#feats_for_vec = extract_feats(test_df, feature_pms=feature_params, analyze=False, train=True) # the train case
# test = extract_feats(test_df, analyze=True, cv=train_cv, train=False) # test case
#%%
# analyze features TODO move to data_exploration
def analyze_feats(featuresDict, resultpath='./exploring/feature_analysis/', cv=None):
# This function is called on the complete output of all the extract functions.
# Put all extracted features into a single dict. You then call vectorize and concat on that based on lookup either manual or via function.
# LIWC is handled separately..
# 0. Append all lists of dicts (dictLists) to one flat list.
featList = []
posfreqList = []
pgrams = None
# Smarter solution : calculate stats directly on dict values in lists of dicts.
statsDict = dict()
#Loop through top level featDict
for feat_level, feat_name in featuresDict.items():
#featList.append(pd.DataFrame(feat_name))
#print(feat_name.keys())
#Second level - individual feature names : ['feature' : int/flaot].
for feat, feat_value in feat_name.items():
#print( feat, type(feat_value))
# store pos features seperately.
if feat == 'pos_freq':
posfreqList.append(pd.DataFrame(feat_value))
continue
if feat == 'pos_ngrams':
pgrams = feat_value
continue
featList.append(pd.DataFrame(feat_value))
# Concat lists of extracted feature dataframes.
featDF = | pd.concat(featList, axis=1) | pandas.concat |
"""
Copyright 2020 The Secure, Reliable, and Intelligent Systems Lab, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import numpy as np
import json
import os
import pandas as pd
import time
import torch
from toolz import interleave
import sys
sys.path.insert(0, 'models')
sys.path.insert(0, 'common')
import predictions as pr
import prediction_generator as prg
from iterated_prediction_loop import IteratedPredictionLoop
from model import GroundTruthModel
from model import ClimatologicalModel
from project_structure import *
import utils
class EvalLoop(IteratedPredictionLoop):
def __init__(self,args):
super(EvalLoop,self).__init__(args,eval_folder)
if(not isinstance(self.steps,list)):
self.steps = [self.steps]
self.nsteps = len(args.steps)
self.rps = args.rps
if self.rps:
self.n_bins = args.n_bins
print("RPS mode")
print("N bins",self.n_bins)
self.predictions, self.binary_predictions = \
pr.construct_predictions_list(args.steps,max(self.n_bins,1))
self.certify = args.certify
self.radius,self.cdf = pr.get_cdf_thresholds()
if self.certify:
print("Radius",self.radius)
print("Cdf",self.cdf)
def log_meta(self,time,folder):
# Log meta file with experiments parameters
stats = {"Time per run (seconds)":time,"N samples":self.samples}
with open(folder + "/meta.json", "w") as f:
f.write(json.dumps(stats,indent=4))
def get_config_folders(self,year_folder):
config_folders = self.list_dir(os.path.join(self.folder, year_folder))
# Duplicate the MDN configs
config_list = []
for c in config_folders:
config,folder_full,run_folders,n_reps = self.load_config(year_folder,c)
if(config["model"]["type"]=="mdn"):
config_list.append({"n reps": n_reps,
"training folder": folder_full,
"output folder": "mdn_ours",
"run folders": run_folders,
"config": config})
elif(config["model"]["type"]=="lstm"):
config_list.append({"n reps":n_reps,
"training folder": folder_full,
"output folder": "lstm",
"run folders": run_folders,
"config": config})
elif config["model"]["type"]=="tcn":
config_list.append({"n reps": n_reps,
"training folder": folder_full,
"output folder": "tcn",
"run folders": run_folders,
"config": config})
else:
raise Exception("No such model")
# Iterate over config
return config_list
# Results has shape (repetition,nsteps)
def get_entries(self,results,config):
entries = np.zeros((self.nsteps,6))
if (config["model"]["type"] == "mdn"):
entries[:,0] = 1
else:
entries[:,0] = 0
entries[:,1] = self.steps
# Average/Std
metrics = np.array(results)
means = np.mean(metrics, axis=0)
stds = np.std(metrics, axis=0)
entries[:,2:4] = means
entries[:,4:6] = stds
return entries
def result_to_dataframe(self,result):
predictions_list = [ self.predictions["steps"],
self.predictions["names"],
self.predictions["prices"]]
predictions_list = zip(*predictions_list)
product = [(x[0],x[1],x[2],y) for x in predictions_list for y in self.samples]
indexes = pd.MultiIndex.from_tuples(product,names=["steps","names","prices","samples"])
columns = ["val"]+\
["bin "+str(i) for i in range(self.n_bins)]+\
["RPS"]
data = result.transpose(1,0,2).reshape(-1,result.shape[2])
dataframe_for_run = pd.DataFrame(index=indexes,
columns=columns,
data=data)
dataframe_for_run.sort_index(inplace=True)
return dataframe_for_run
def binary_results_to_dataframe(self,result):
if self.certify:
len_binary = 4+len(self.cdf)
else:
len_binary = 4
predictions_list = [self.binary_predictions["steps"],
self.binary_predictions["names"],
self.binary_predictions["prices"]]
predictions_list = zip(*predictions_list)
product = [(x[0], x[1], x[2], y) for x in predictions_list for y in self.samples]
indexes = | pd.MultiIndex.from_tuples(product, names=["steps", "names", "prices", "samples"]) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.options.display.max_columns = None
# In[3]:
df = pd.read_csv('full_data.csv')
# In[4]:
df.head()
# Since i got the data from lolchess.gg, i only got informations of the date in the type of "2 days ago", "1 week ago" and so on. So the date is only an approximation.
# In[5]:
df = df.loc[:, (df != 0).any(axis=0)]
# In[6]:
df.head()
# In[7]:
df.info()
# In[8]:
df.describe()
# In[9]:
date_count = pd.DataFrame(df['date'].value_counts()).sort_index(ascending = False).reset_index().rename(columns={'index': 'Date', 'date': 'Count'})
date_count
# I will concentrate the analysis on the data since last thursday (2020-12-10)
# In[10]:
df = df[df['date'] >= '2020-12-10']
# In[11]:
tdf = df[df['Placement'] <= 8] #Look at the traits of all placements
tdf = tdf.reset_index(drop = True)
tdf.head()
# In[12]:
ptdf = tdf.iloc[:, 4:]
# In[13]:
# Start with two highest number of traits
#ptdf = ptdf[ptdf.apply(lambda row: row >= row.nlargest(2)[-1],axis=1)]
ptdf = ptdf[ptdf.apply(lambda row: row >= 1,axis=1)]
ptdf.head()
# In[14]:
combinations = ptdf.notna().dot(ptdf.columns+' + ').str.rstrip(' +')
# In[15]:
nr_combs = []
for count, i in enumerate(ptdf.values):
clear_list = []
current_traits = combinations.iloc[count].split(' + ')
count2 = 0
for j in i:
#print(j)
if j > 0:
clear_list.append(current_traits[count2] + ' (' + str(int(j)) + ')')
clear_list.sort(key = lambda x: int(x.split('(')[1][-2]), reverse = True)
count2 += 1
nr_combs.append(' + '.join(clear_list))
# In[16]:
pd.DataFrame(nr_combs)
# In[17]:
df_test1 = pd.concat([tdf.iloc[:, :4], pd.DataFrame(nr_combs)], axis=1).rename(columns = {0: 'comb'})
df_test1.head()
# In[18]:
| pd.set_option('display.max_rows', None) | pandas.set_option |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = | Index([], name='Foo') | pandas.core.index.Index |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
| tm.assert_frame_equal(result2, original) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_net_income
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Líquido das Operações Continuadas' OR ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Lucro/Prejuízo do Período"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_net_income"] = df["ytd_net_income"] - \
df["ytd_net_income"].shift(1)
df["quarter_net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_net_income"] = (
df["quarter_net_income"].rolling(window=4, min_periods=4).sum()
)
if quarter == False:
df = df.drop(["quarter_net_income"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_income"], axis=1)
df = df[df.index >= begin_period + | pd.DateOffset(months=12) | pandas.DateOffset |
# Testing array.blend
import utipy as ut
import numpy as np
import pandas as pd
def test_blend_list():
x1 = [1, 2, 3, 4, 5, 6]
x2 = [2, 3, 4, 5, 6, 7]
blended0 = ut.blend(x1, x2, amount=0)
blended1 = ut.blend(x1, x2, amount=1)
blended05 = ut.blend(x1, x2, amount=.5)
assert blended0 == x1
assert blended1 == x2
assert blended05 == [1.5, 2.5, 3.5, 4.5, 5.5, 6.5]
def test_blend_ndarray():
x1 = np.asarray([1, 2, 3, 4, 5, 6])
x2 = np.asarray([2, 3, 4, 5, 6, 7])
blended0 = ut.blend(x1, x2, amount=0)
blended1 = ut.blend(x1, x2, amount=1)
blended05 = ut.blend(x1, x2, amount=.5)
assert (blended0 == x1).all()
assert (blended1 == x2).all()
assert (blended05 == np.asarray([1.5, 2.5, 3.5, 4.5, 5.5, 6.5])).all()
def test_blend_Series():
x1 = pd.Series([1, 2, 3, 4, 5, 6])
x2 = | pd.Series([2, 3, 4, 5, 6, 7]) | pandas.Series |
from dataProcessing import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import IsolationForest
import pandas as pd
import pickle
from RandomForestCounterFactual import *
def checkSamples(
datasetFileName,
unscaledFactualsFileName,
unscaledCounterFactualsFileName,
serializedClassifierFileName,
roundMace = False
):
reader = DatasetReader(datasetFileName,rangeFeasibilityForDiscreteFeatures=True)
# Classifier
clfRead, clfScaled = reader.readRandomForestFromPickleAndApplyMinMaxScaling(serializedClassifierFileName)
# Factuals
unscaledFactuals = pd.read_csv(unscaledFactualsFileName)
scaledFactuals = | pd.DataFrame() | pandas.DataFrame |
# import hashlib
# import random
# import json
import binascii
import json
import traceback
import uuid
import numpy as np
import datetime
import Crypto
import Crypto.Random
from Crypto.Random import get_random_bytes
# from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import AES
# import werkzeug
import requests
import os
import pandas as pd
import qrcode
# from pyzbar.pyzbar import decode
class Client:
def __init__(self,server_ip,password=None):
random = Crypto.Random.new().read
self._private_key = RSA.generate(2048, random)
self._public_key = self._private_key.publickey()
self.contacts_name_key={}
self.contacts_pkey_key = {}
self.contacts=pd.DataFrame(columns=['name','public_key'])
#self.groups = {}
self.server_ip=server_ip
self.request_session=requests.Session()
self._public_key_send=binascii.b2a_hex(self._public_key.exportKey()).decode('ascii')
self.chat_history=pd.DataFrame(columns=['sender_contact_id','receiver_contact_id','message','time','contact_id','content_address']) # contact:messages
self.aes_key= get_random_bytes(16)
self.aes_cipher = AES.new(self.aes_key, AES.MODE_EAX)
self.nonce = self.aes_cipher.nonce
self.download_files_path='secure_messenger_download'
if not os.path.isdir(self.download_files_path):
os.makedirs(self.download_files_path)
self.password=password
if password is not None:
if os.path.isfile('private_key.pem') and os.path.isfile('aes_key.pem') and os.path.isfile('nonce.pem'):
self.login_local(self.password)
try:
self.load_contacts()
self.load_chat_history()
except Exception as e:
''
else:
self.dump_password_encrypted_private_key(password)
def dump_password_encrypted_private_key(self,password):
with open('private_key.pem','wb') as f:
dumped=self._private_key.export_key(passphrase=password)
f.write(dumped)
with open('aes_key.pem','wb') as f:
encryptor = PKCS1_OAEP.new(self._public_key)
encrypted = encryptor.encrypt(self.aes_key)
f.write(encrypted)
with open('nonce.pem','wb') as f:
encryptor = PKCS1_OAEP.new(self._public_key)
encrypted = encryptor.encrypt(self.nonce)
f.write(encrypted)
def sign_up(self,):
if self.server_ip is None:
return
# send public key to server
url=self.server_ip+'/login'
payload = { 'time': str(datetime.datetime.now().timestamp()), 'public_key': self._public_key_send}
resp=self.request_session.post(url, json=payload)
def login_local(self,password):
with open('private_key.pem', 'rb') as f:
key_enc=f.read()
self._private_key = RSA.importKey(key_enc,passphrase=password)
self._public_key = self._private_key.publickey()
self._public_key_send = binascii.b2a_hex(self._public_key.exportKey()).decode('ascii')
with open('aes_key.pem','rb') as f:
data=f.read()
decrypter = PKCS1_OAEP.new(self._private_key)
decrypted_dict = decrypter.decrypt(data)
self.aes_key = decrypted_dict
with open('nonce.pem', 'rb') as f:
data = f.read()
decrypter = PKCS1_OAEP.new(self._private_key)
decrypted_dict = decrypter.decrypt(data)
self.nonce = decrypted_dict
def dump_contacts(self):
# encryptor = PKCS1_OAEP.new(self._public_key)
cipher = AES.new(self.aes_key, AES.MODE_EAX, self.nonce)
ciphertext = cipher.encrypt(self.contacts.to_json(orient='records').encode())
# to_write=encryptor.encrypt(self.contacts.to_json(orient='records').encode())
with open('contacts.p', 'wb') as f:
f.write(ciphertext)
def load_contacts(self):
# decrypter = PKCS1_OAEP.new(self._private_key)
cipher = AES.new(self.aes_key, AES.MODE_EAX, self.nonce)
if os.path.isfile('contacts.p'):
with open('contacts.p', 'rb') as f:
# decrypted=decrypter.decrypt(f.read())
data = cipher.decrypt(f.read())
self.contacts=pd.read_json(data.decode(),orient='records')
def send_message(self,contact_id,msg=None,file=None,extension=None,file_path=None):
if self.server_ip is None:
print('no server')
return
key = get_random_bytes(16)
cipher = AES.new(key, AES.MODE_EAX)
nonce = cipher.nonce
if file is not None:
ciphertext_file = cipher.encrypt(file)
cipher = AES.new(key, AES.MODE_EAX, nonce)
else:
ciphertext_file=b''
if msg is not None:
ciphertext_msg = cipher.encrypt(msg.encode('ascii'))
else:
ciphertext_msg=''.encode('ascii')
receiver_public_key=RSA.importKey(binascii.a2b_hex(self.contacts.iloc[contact_id]['public_key']))
receiver_public_key_send = self.contacts.iloc[contact_id]['public_key']
encryptor = PKCS1_OAEP.new(receiver_public_key)
encrypted_nonce=binascii.b2a_hex(encryptor.encrypt(nonce)).decode('ascii')
encrypted_aes_key = binascii.b2a_hex(encryptor.encrypt(key)).decode('ascii')
encrypted_file = binascii.b2a_hex(ciphertext_file).decode('ascii')
encrypted_message = binascii.b2a_hex(ciphertext_msg).decode('ascii')
dtime=str(datetime.datetime.now().timestamp())
#send request
url=self.server_ip+'/textmessage'
payload={'message':encrypted_message,'encrypted_file':encrypted_file,'time':dtime,'receiver_public_key':receiver_public_key_send,'sender_public_key':self._public_key_send,'encrypted_nonce':encrypted_nonce,'encrypted_aes_key':encrypted_aes_key,'extension':extension}
print('payload',payload)
resp=self.request_session.post(url,json=payload)
if resp.json()['status']=='success':
self.update_chat_history('me',contact_id,msg,dtime,file_path)
def add_contact(self,public_key,name):
contact= | pd.Series({'public_key':public_key,'name':name}) | pandas.Series |
"""Run the file manually to find all angle data."""
import math
import numpy as np
import pandas as pd
from lilypadz.helper.constant import TOAD_HOP
def convert_xyz_to_kinematic(xyz_data: pd.DataFrame) -> pd.DataFrame:
"""Calculate three kinematic variables from the XYZ data.
:param xyz_data: xyz data of a hop of a specific toad.
:return: a pandas DataFrame that holds the kinematic variables.
"""
# Create a data frame for kinematic data.
angle_data = pd.DataFrame(
columns=['Elbow_Flex_Ext', 'Humeral_Pro_Ret', 'Humeral_Dep_Ele']
)
# Iterate over rows to find the angle data for each row.
for index, row in xyz_data.iterrows():
# If the row contains any empty data, make the entire angle row empty.
if row.isnull().values.any():
elbow_flex_ext = np.NaN
humeral_pro_ret = np.NaN
humeral_dep_ele = np.NaN
# Do the proper calculations.
else:
# Calculate Elbow Flexion/Extraction
seg_a = math.sqrt(
(row['pt4_X'] - row['pt5_X']) ** 2 +
(row['pt4_Y'] - row['pt5_Y']) ** 2 +
(row['pt4_Z'] - row['pt5_Z']) ** 2
)
seg_b = math.sqrt(
(row['pt5_X'] - row['pt6_X']) ** 2 +
(row['pt5_Y'] - row['pt6_Y']) ** 2 +
(row['pt5_Z'] - row['pt6_Z']) ** 2
)
seg_c = math.sqrt(
(row['pt4_X'] - row['pt6_X']) ** 2 +
(row['pt4_Y'] - row['pt6_Y']) ** 2 +
(row['pt4_Z'] - row['pt6_Z']) ** 2
)
try:
elbow_flex_ext = math.degrees(
math.acos(
(seg_c ** 2 - seg_a ** 2 - seg_b ** 2) /
(-2 * seg_a * seg_b)
)
)
except ZeroDivisionError:
elbow_flex_ext = 0
# Calculate Humeral Protraction/Retraction
pt5_x2 = row['pt5_X'] + (row['pt2_X'] - row['pt4_X'])
pt5_y2 = row['pt5_Y'] + (row['pt2_Y'] - row['pt4_Y'])
pt5_z2 = row['pt5_Z'] + (row['pt2_Z'] - row['pt4_Z'])
seg_d = math.sqrt(
(row['pt1_X'] - row['pt2_X']) ** 2 +
(row['pt1_Y'] - row['pt2_Y']) ** 2 +
(row['pt1_Z'] - row['pt2_Z']) ** 2
)
seg_e = math.sqrt(
(row['pt2_X'] - pt5_x2) ** 2 +
(row['pt2_Y'] - pt5_y2) ** 2 +
(row['pt2_Z'] - pt5_z2) ** 2
)
seg_f = math.sqrt(
(pt5_x2 - row['pt1_X']) ** 2 +
(pt5_y2 - row['pt1_Y']) ** 2 +
(pt5_z2 - row['pt1_Z']) ** 2
)
try:
humeral_pro_ret = 180 - math.degrees(
math.acos(
(seg_f ** 2 - seg_d ** 2 - seg_e ** 2) /
(-2 * seg_d * seg_e)
)
)
except ZeroDivisionError:
humeral_pro_ret = 0
# Calculate Humeral Depression/Elevation
pt5_x3 = row['pt5_X'] + (row['pt3_X'] - row['pt4_X'])
pt5_y3 = row['pt5_Y'] + (row['pt3_Y'] - row['pt4_Y'])
pt5_z3 = row['pt5_Z'] + (row['pt3_Z'] - row['pt4_Z'])
seg_g = math.sqrt(
(row['pt3_X'] - row['pt2_X']) ** 2 +
(row['pt3_Y'] - row['pt2_Y']) ** 2 +
(row['pt3_Z'] - row['pt2_Z']) ** 2
)
seg_h = math.sqrt(
(row['pt3_X'] - pt5_x3) ** 2 +
(row['pt3_Y'] - pt5_y3) ** 2 +
(row['pt3_Z'] - pt5_z3) ** 2
)
seg_i = math.sqrt(
(pt5_x3 - row['pt2_X']) ** 2 +
(pt5_y3 - row['pt2_Y']) ** 2 +
(pt5_z3 - row['pt2_Z']) ** 2
)
try:
humeral_dep_ele = 180 - math.degrees(
math.acos(
(seg_i ** 2 - seg_h ** 2 - seg_g ** 2) /
(-2 * seg_g * seg_h)
)
)
except ZeroDivisionError:
humeral_dep_ele = 0
angle_data = angle_data.append(
{'Elbow_Flex_Ext': elbow_flex_ext,
'Humeral_Pro_Ret': humeral_pro_ret,
'Humeral_Dep_Ele': humeral_dep_ele},
ignore_index=True
)
return angle_data
def save_all_kinematic_data():
for name, hops in TOAD_HOP.items():
for hop in hops:
xyz_data = | pd.read_csv(f"{name}/{hop}/xyz.csv") | pandas.read_csv |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from textwrap import dedent
from parameterized import parameterized
import numpy as np
from numpy import nan
import pandas as pd
from zipline._protocol import handle_non_market_minutes, BarData
from zipline.assets import Asset, Equity
from zipline.errors import (
HistoryInInitialize,
HistoryWindowStartsBeforeData,
)
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.testing import (
create_minute_df_for_asset,
str_to_seconds,
MockDailyBarReader,
)
import zipline.testing.fixtures as zf
OHLC = ['open', 'high', 'low', 'close']
OHLCP = OHLC + ['price']
ALL_FIELDS = OHLCP + ['volume']
class WithHistory(zf.WithCreateBarData, zf.WithDataPortal):
TRADING_START_DT = TRADING_ENV_MIN_DATE = START_DATE = pd.Timestamp(
'2014-01-03',
tz='UTC',
)
TRADING_END_DT = END_DATE = pd.Timestamp('2016-01-29', tz='UTC')
SPLIT_ASSET_SID = 4
DIVIDEND_ASSET_SID = 5
MERGER_ASSET_SID = 6
HALF_DAY_TEST_ASSET_SID = 7
SHORT_ASSET_SID = 8
# asset1:
# - 2014-03-01 (rounds up to TRADING_START_DT) to 2016-01-29.
# - every minute/day.
# asset2:
# - 2015-01-05 to 2015-12-31
# - every minute/day.
# asset3:
# - 2015-01-05 to 2015-12-31
# - trades every 10 minutes
# SPLIT_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - splits on 2015-01-05 and 2015-01-06
# DIVIDEND_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - dividends on 2015-01-05 and 2015-01-06
# MERGER_ASSET
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - merger on 2015-01-05 and 2015-01-06
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.trading_days = cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT
)
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.ASSET3 = cls.asset_finder.retrieve_asset(3)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.HALF_DAY_TEST_ASSET = cls.asset_finder.retrieve_asset(
cls.HALF_DAY_TEST_ASSET_SID,
)
cls.SHORT_ASSET = cls.asset_finder.retrieve_asset(
cls.SHORT_ASSET_SID,
)
@classmethod
def make_equity_info(cls):
jan_5_2015 = pd.Timestamp('2015-01-05', tz='UTC')
day_after_12312015 = pd.Timestamp('2016-01-04', tz='UTC')
return pd.DataFrame.from_dict(
{
1: {
'start_date': pd.Timestamp('2014-01-03', tz='UTC'),
'end_date': cls.TRADING_END_DT,
'symbol': 'ASSET1',
'exchange': "TEST",
},
2: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET2',
'exchange': "TEST",
},
3: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET3',
'exchange': "TEST",
},
cls.SPLIT_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'SPLIT_ASSET',
'exchange': "TEST",
},
cls.DIVIDEND_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'DIVIDEND_ASSET',
'exchange': "TEST",
},
cls.MERGER_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'MERGER_ASSET',
'exchange': "TEST",
},
cls.HALF_DAY_TEST_ASSET_SID: {
'start_date': pd.Timestamp('2014-07-02', tz='UTC'),
'end_date': day_after_12312015,
'symbol': 'HALF_DAY_TEST_ASSET',
'exchange': "TEST",
},
cls.SHORT_ASSET_SID: {
'start_date': pd.Timestamp('2015-01-05', tz='UTC'),
'end_date': pd.Timestamp('2015-01-06', tz='UTC'),
'symbol': 'SHORT_ASSET',
'exchange': "TEST",
}
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid'],
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT,
),
)
def verify_regular_dt(self, idx, dt, mode, fields=None, assets=None):
if mode == 'daily':
freq = '1d'
else:
freq = '1m'
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
def reindex_to_primary_calendar(a, field):
"""
Reindex an array of prices from a window on the NYSE
calendar by the window on the primary calendar with the same
dt and window size.
"""
if mode == 'daily':
dts = cal.sessions_window(dt, -9)
# `dt` may not be a session on the equity calendar, so
# find the next valid session.
equity_sess = equity_cal.minute_to_session_label(dt)
equity_dts = equity_cal.sessions_window(equity_sess, -9)
elif mode == 'minute':
dts = cal.minutes_window(dt, -10)
equity_dts = equity_cal.minutes_window(dt, -10)
output = pd.Series(
index=equity_dts,
data=a,
).reindex(dts)
# Fill after reindexing, to ensure we don't forward fill
# with values that are being dropped.
if field == 'volume':
return output.fillna(0)
elif field == 'price':
return output.fillna(method='ffill')
else:
return output
fields = fields if fields is not None else ALL_FIELDS
assets = assets if assets is not None else [self.ASSET2, self.ASSET3]
bar_data = self.create_bardata(
simulation_dt_func=lambda: dt,
)
check_internal_consistency(
bar_data, assets, fields, 10, freq
)
for field in fields:
for asset in assets:
asset_series = bar_data.history(asset, field, 10, freq)
base = MINUTE_FIELD_INFO[field] + 2
if idx < 9:
missing_count = 9 - idx
present_count = 9 - missing_count
if field in OHLCP:
if asset == self.ASSET2:
# asset2 should have some leading nans
np.testing.assert_array_equal(
np.full(missing_count, np.nan),
asset_series[0:missing_count]
)
# asset2 should also have some real values
np.testing.assert_array_equal(
np.array(range(base,
base + present_count + 1)),
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 should be NaN the entire time
np.testing.assert_array_equal(
np.full(10, np.nan),
asset_series
)
elif field == 'volume':
if asset == self.ASSET2:
# asset2 should have some zeros (instead of nans)
np.testing.assert_array_equal(
np.zeros(missing_count),
asset_series[0:missing_count]
)
# and some real values
np.testing.assert_array_equal(
np.array(
range(base, base + present_count + 1)
) * 100,
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 is all zeros, no volume yet
np.testing.assert_array_equal(
np.zeros(10),
asset_series
)
else:
# asset3 should have data every 10 minutes
# construct an array full of nans, put something in the
# right slot, and test for comparison
position_from_end = ((idx + 1) % 10) + 1
# asset3's baseline data is 9 NaNs, then 11, then 9 NaNs,
# then 21, etc. for idx 9 to 19, value_for_asset3 should
# be a baseline of 11 (then adjusted for the individual
# field), thus the rounding down to the nearest 10.
value_for_asset3 = (((idx + 1) // 10) * 10) + \
MINUTE_FIELD_INFO[field] + 1
if field in OHLC:
asset3_answer_key = np.full(10, np.nan)
asset3_answer_key[-position_from_end] = \
value_for_asset3
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
),
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'volume':
asset3_answer_key = np.zeros(10)
asset3_answer_key[-position_from_end] = \
value_for_asset3 * 100
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
) * 100,
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'price':
# price is always forward filled
# asset2 has prices every minute, so it's easy
if asset == self.ASSET2:
# at idx 9, the data is 2 to 11
np.testing.assert_array_equal(
reindex_to_primary_calendar(
range(idx - 7, idx + 3),
field=field,
),
asset_series
)
if asset == self.ASSET3:
# Second part begins on the session after
# `position_from_end` on the NYSE calendar.
second_begin = (
dt - equity_cal.day * (position_from_end - 1)
)
# First part goes up until the start of the
# second part, because we forward-fill.
first_end = second_begin - cal.day
first_part = asset_series[:first_end]
second_part = asset_series[second_begin:]
decile_count = ((idx + 1) // 10)
# in our test data, asset3 prices will be nine
# NaNs, then ten 11s, ten 21s, ten 31s...
if len(second_part) >= 10:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
elif decile_count == 1:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
np.testing.assert_array_equal(
np.array([11] * len(second_part)),
second_part
)
else:
np.testing.assert_array_equal(
np.array([decile_count * 10 - 9] *
len(first_part)),
first_part
)
np.testing.assert_array_equal(
np.array([decile_count * 10 + 1] *
len(second_part)),
second_part
)
def check_internal_consistency(bar_data, assets, fields, bar_count, freq):
if isinstance(assets, Asset):
asset_list = [assets]
else:
asset_list = assets
if isinstance(fields, str):
field_list = [fields]
else:
field_list = fields
multi_field_dict = {
asset: bar_data.history(asset, field_list, bar_count, freq)
for asset in asset_list
}
multi_asset_dict = {
field: bar_data.history(asset_list, field, bar_count, freq)
for field in fields
}
panel = bar_data.history(asset_list, field_list, bar_count, freq)
for field in field_list:
# make sure all the different query forms are internally
# consistent
for asset in asset_list:
series = bar_data.history(asset, field, bar_count, freq)
np.testing.assert_array_equal(
series,
multi_asset_dict[field][asset]
)
np.testing.assert_array_equal(
series,
multi_field_dict[asset][field]
)
np.testing.assert_array_equal(
series,
panel[field][asset]
)
# each minute's OHLCV data has a consistent offset for each field.
# for example, the open is always 1 higher than the close, the high
# is always 2 higher than the close, etc.
MINUTE_FIELD_INFO = {
'open': 1,
'high': 2,
'low': -1,
'close': 0,
'price': 0,
'volume': 0, # unused, later we'll multiply by 100
}
class MinuteEquityHistoryTestCase(WithHistory,
zf.WithMakeAlgo,
zf.ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
DATA_PORTAL_FIRST_TRADING_DAY = zf.alias('TRADING_START_DT')
@classmethod
def make_equity_minute_bar_data(cls):
equities_cal = cls.trading_calendars[Equity]
data = {}
sids = {2, 5, cls.SHORT_ASSET_SID, cls.HALF_DAY_TEST_ASSET_SID}
for sid in sids:
asset = cls.asset_finder.retrieve_asset(sid)
data[sid] = create_minute_df_for_asset(
equities_cal,
asset.start_date,
asset.end_date,
start_val=2,
)
data[1] = create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2014-01-03', tz='utc'),
pd.Timestamp('2016-01-29', tz='utc'),
start_val=2,
)
asset2 = cls.asset_finder.retrieve_asset(2)
data[asset2.sid] = create_minute_df_for_asset(
equities_cal,
asset2.start_date,
equities_cal.previous_session_label(asset2.end_date),
start_val=2,
minute_blacklist=[
pd.Timestamp('2015-01-08 14:31', tz='UTC'),
pd.Timestamp('2015-01-08 21:00', tz='UTC'),
],
)
# Start values are crafted so that the thousands place are equal when
# adjustments are applied correctly.
# The splits and mergers are defined as 4:1 then 2:1 ratios, so the
# prices approximate that adjustment by quartering and then halving
# the thousands place.
data[cls.MERGER_ASSET_SID] = data[cls.SPLIT_ASSET_SID] = pd.concat((
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-05', tz='UTC'),
start_val=8000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
start_val=2000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-07', tz='UTC'),
pd.Timestamp('2015-01-07', tz='UTC'),
start_val=1000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-08', tz='UTC'),
pd.Timestamp('2015-01-08', tz='UTC'),
start_val=1000)
))
asset3 = cls.asset_finder.retrieve_asset(3)
data[3] = create_minute_df_for_asset(
equities_cal,
asset3.start_date,
asset3.end_date,
start_val=2,
interval=10,
)
return data.items()
def test_history_in_initialize(self):
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
history([1], 10, '1d', 'price')
def handle_data(context, data):
pass
"""
)
algo = self.make_algo(script=algo_text)
with self.assertRaises(HistoryInInitialize):
algo.run()
def test_negative_bar_count(self):
"""
Negative bar counts leak future information.
"""
with self.assertRaisesRegex(
ValueError,
"bar_count must be >= 1, but got -1"
):
self.data_portal.get_history_window(
[self.ASSET1],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
-1,
'1d',
'close',
'minute',
)
def test_daily_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, 1/4 and 1/5
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.open_and_close_for_session(jan5)[1],
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([np.nan, 8389]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# Value from 1/5 should be quartered
np.testing.assert_array_equal(
[2097.25,
# Split occurs. The value of the thousands place should
# match.
2004],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[1048.625, 1194.50, 1004.0],
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted
np.testing.assert_array_equal([1389, 1009], window4)
def test_daily_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
asset = self.DIVIDEND_ASSET
# before any of the dividends
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.session_close(jan5),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([nan, 391]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[383.18, # 391 (last close) * 0.98 (first div)
# Dividend occurs prior.
396],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[367.853, # 391 (last close) * 0.98 * 0.96 (both)
749.76, # 781 (last_close) * 0.96 (second div)
786], # no adjustment
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted, should be 787 to 791
np.testing.assert_array_equal([1171, 1181], window4)
def test_minute_before_assets_trading(self):
# since asset2 and asset3 both started trading on 1/5/2015, let's do
# some history windows that are completely before that
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(pd.Timestamp(
'2015-01-05', tz='UTC'
))
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
check_internal_consistency(
bar_data, [self.ASSET2, self.ASSET3], ALL_FIELDS, 10, '1m'
)
for field in ALL_FIELDS:
# OHLCP should be NaN
# Volume should be 0
asset2_series = bar_data.history(self.ASSET2, field, 10, '1m')
asset3_series = bar_data.history(self.ASSET3, field, 10, '1m')
if field == 'volume':
np.testing.assert_array_equal(np.zeros(10), asset2_series)
np.testing.assert_array_equal(np.zeros(10), asset3_series)
else:
np.testing.assert_array_equal(
np.full(10, np.nan),
asset2_series
)
np.testing.assert_array_equal(
np.full(10, np.nan),
asset3_series
)
@parameterized.expand([
('open_sid_2', 'open', 2),
('high_sid_2', 'high', 2),
('low_sid_2', 'low', 2),
('close_sid_2', 'close', 2),
('volume_sid_2', 'volume', 2),
('open_sid_3', 'open', 3),
('high_sid_3', 'high', 3),
('low_sid_3', 'low', 3),
('close_sid_3', 'close', 3),
('volume_sid_3', 'volume', 3),
])
def test_minute_regular(self, name, field, sid):
# asset2 and asset3 both started on 1/5/2015, but asset3 trades every
# 10 minutes
asset = self.asset_finder.retrieve_asset(sid)
# Check the first hour of equities trading.
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-05', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
self.verify_regular_dt(idx, minute, 'minute',
assets=[asset],
fields=[field])
def test_minute_sunday_midnight(self):
# Most trading calendars aren't open at midnight on Sunday.
sunday_midnight = pd.Timestamp('2015-01-09', tz='UTC')
# Find the closest prior minute when the trading calendar was
# open (note that if the calendar is open at `sunday_midnight`,
# this will be `sunday_midnight`).
trading_minutes = self.trading_calendar.all_minutes
last_minute = trading_minutes[trading_minutes <= sunday_midnight][-1]
sunday_midnight_bar_data = self.create_bardata(lambda: sunday_midnight)
last_minute_bar_data = self.create_bardata(lambda: last_minute)
# Ensure that we get the same results at midnight on Sunday as
# the last open minute.
with handle_non_market_minutes(sunday_midnight_bar_data):
for field in ALL_FIELDS:
np.testing.assert_array_equal(
sunday_midnight_bar_data.history(
self.ASSET2,
field,
30,
'1m',
),
last_minute_bar_data.history(self.ASSET2, field, 30, '1m')
)
def test_minute_after_asset_stopped(self):
# SHORT_ASSET's last day was 2015-01-06
# get some history windows that straddle the end
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-07', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute
)
check_internal_consistency(
bar_data, self.SHORT_ASSET, ALL_FIELDS, 30, '1m'
)
# Reset data portal because it has advanced past next test date.
data_portal = self.make_data_portal()
# close high low open price volume
# 2015-01-06 20:47:00+00:00 768 770 767 769 768 76800
# 2015-01-06 20:48:00+00:00 769 771 768 770 769 76900
# 2015-01-06 20:49:00+00:00 770 772 769 771 770 77000
# 2015-01-06 20:50:00+00:00 771 773 770 772 771 77100
# 2015-01-06 20:51:00+00:00 772 774 771 773 772 77200
# 2015-01-06 20:52:00+00:00 773 775 772 774 773 77300
# 2015-01-06 20:53:00+00:00 774 776 773 775 774 77400
# 2015-01-06 20:54:00+00:00 775 777 774 776 775 77500
# 2015-01-06 20:55:00+00:00 776 778 775 777 776 77600
# 2015-01-06 20:56:00+00:00 777 779 776 778 777 77700
# 2015-01-06 20:57:00+00:00 778 780 777 779 778 77800
# 2015-01-06 20:58:00+00:00 779 781 778 780 779 77900
# 2015-01-06 20:59:00+00:00 780 782 779 781 780 78000
# 2015-01-06 21:00:00+00:00 781 783 780 782 781 78100
# 2015-01-07 14:31:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:32:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:33:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:34:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:35:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:36:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:37:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:38:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:39:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:40:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:41:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:42:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:43:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:44:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:45:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:46:00+00:00 NaN NaN NaN NaN NaN 0
# choose a window that contains the last minute of the asset
window_start = pd.Timestamp('2015-01-06 20:47', tz='UTC')
window_end = pd.Timestamp('2015-01-07 14:46', tz='UTC')
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: minutes[15],
data_frequency='minute',
restrictions=NoRestrictions(),
trading_calendar=self.trading_calendar,
)
bar_count = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
window = bar_data.history(
self.SHORT_ASSET,
ALL_FIELDS,
bar_count,
'1m',
)
# Window should start with 14 values and end with 16 NaNs/0s.
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(
range(76800, 78101, 100),
window['volume'][0:14]
)
np.testing.assert_array_equal(
np.zeros(16),
window['volume'][-16:]
)
else:
np.testing.assert_array_equal(
np.array(range(768, 782)) + MINUTE_FIELD_INFO[field],
window[field][0:14]
)
np.testing.assert_array_equal(
np.full(16, np.nan),
window[field][-16:]
)
# now do a smaller window that is entirely contained after the asset
# ends
window = bar_data.history(self.SHORT_ASSET, ALL_FIELDS, 5, '1m')
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(np.zeros(5), window['volume'])
else:
np.testing.assert_array_equal(np.full(5, np.nan),
window[field])
def test_minute_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
# the assets' close column starts at 2 on the first minute of
# 1/5, then goes up one per minute forever
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, last 10 minutes of jan 5
equity_cal = self.trading_calendars[Equity]
window1 = self.data_portal.get_history_window(
[asset],
equity_cal.open_and_close_for_session(jan5)[1],
10,
'1m',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
np.array(range(8380, 8390)), window1)
# straddling the first event - begins with the last 5 equity
# minutes on 2015-01-05, ends with the first 5 on
# 2015-01-06.
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(self.trading_calendar.minutes_in_range(
window2_start,
window2_end,
))
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
window2_count,
'1m',
'close',
'minute',
)[asset]
# five minutes from 1/5 should be halved
np.testing.assert_array_equal(
[2096.25,
2096.5,
2096.75,
2097,
2097.25],
window2[:5],
)
# Split occurs. The value of the thousands place should
# match.
np.testing.assert_array_equal(
[2000,
2001,
2002,
2003,
2004],
window2[-5:],
)
# straddling both events! on the equities calendar this is 5
# minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5.
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = | pd.Timestamp('2015-01-07 14:35', tz='UTC') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from typing import Optional, Union
import pandas as pd
from mando import command
from tstoolbox import tsutils
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
try:
from mando.rst_text_formatter import RSTHelpFormatter as HelpFormatter
except ImportError:
from argparse import RawTextHelpFormatter as HelpFormatter
@command("cdec", formatter_class=HelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def cdec_cli(
station_id, dur_code=None, sensor_num=None, start_date=None, end_date=None
):
r"""Access data from the 'California Department of Water Resources'.
The web site is called the 'California Data Exchange Center'.
California Department of Water Resources: http://www.water.ca.gov/
California Data Exchange Center: http://cdec.water.ca.gov
Downloads data for a set of CDEC station and sensor ids. If either is not
provided, all available data will be downloaded.
Parameters
----------
station_id: str
[optional, default is None]
Each string is the CDEC station ID and consist of three capital
letters.
sensor_num: integer, comma separated integers or ``None``
[optional, default is None]
If ``None`` will get all sensors at `station_id`.
SELECTED CDEC SENSOR NUMBERS (these are not available for all
sites):
+------------+-------------------------------------------+
| sensor_num | Description |
+============+===========================================+
| 1 | river stage [ft] |
+------------+-------------------------------------------+
| 2 | precipitation accumulated [in] |
+------------+-------------------------------------------+
| 3 | SWE [in] |
+------------+-------------------------------------------+
| 4 | air temperature [F] |
+------------+-------------------------------------------+
| 5 | EC [ms/cm] |
+------------+-------------------------------------------+
| 6 | reservoir elevation [ft] |
+------------+-------------------------------------------+
| 7 | reservoir scheduled release [cfs] |
+------------+-------------------------------------------+
| 8 | full natural flow [cfs] |
+------------+-------------------------------------------+
| 15 | reservoir storage [af] |
+------------+-------------------------------------------+
| 20 | flow -- river discharge [cfs] |
+------------+-------------------------------------------+
| 22 | reservoir storage change [af] |
+------------+-------------------------------------------+
| 23 | reservoir outflow [cfs] |
+------------+-------------------------------------------+
| 24 | Evapotranspiration [in] |
+------------+-------------------------------------------+
| 25 | water temperature [F] |
+------------+-------------------------------------------+
| 27 | water turbidity [ntu] |
+------------+-------------------------------------------+
| 28 | chlorophyll [ug/l] |
+------------+-------------------------------------------+
| 41 | flow -- mean daily [cfs] |
+------------+-------------------------------------------+
| 45 | precipitation incremental [in] |
+------------+-------------------------------------------+
| 46 | runoff volume [af] |
+------------+-------------------------------------------+
| 61 | water dissolved oxygen [mg/l] |
+------------+-------------------------------------------+
| 62 | water pH value [pH] |
+------------+-------------------------------------------+
| 64 | pan evaporation (incremental) [in] |
+------------+-------------------------------------------+
| 65 | full natural flow [af] |
+------------+-------------------------------------------+
| 66 | flow -- monthly volume [af] |
+------------+-------------------------------------------+
| 67 | accretions (estimated) [af] |
+------------+-------------------------------------------+
| 71 | spillway discharge [cfs] |
+------------+-------------------------------------------+
| 74 | lake evaporation (computed) [cfs] |
+------------+-------------------------------------------+
| 76 | reservoir inflow [cfs] |
+------------+-------------------------------------------+
| 85 | control regulating discharge [cfs] |
+------------+-------------------------------------------+
| 94 | top conservation storage (reservoir) [af] |
+------------+-------------------------------------------+
| 100 | water EC [us/cm] |
+------------+-------------------------------------------+
dur_code: str, comma separated strings, or ``None``
[optional, default is None]
Possible values are 'E', 'H', 'D', and 'M' but not
all of these time resolutions are available at every station.
+----------+-------------+
| dur_code | Description |
+==========+=============+
| E | event |
+----------+-------------+
| H | hourly |
+----------+-------------+
| D | daily |
+----------+-------------+
| M | monthly |
+----------+-------------+
{start_date}
{end_date}
"""
tsutils._printiso(
cdec(
station_id,
dur_code=dur_code,
sensor_num=sensor_num,
start_date=start_date,
end_date=end_date,
)
)
DEFAULT_START_DATE = "01/01/1901"
DEFAULT_END_DATE = "Now"
def get_stations():
"""Fetch information on all CDEC sites.
Returns
-------
df : pandas DataFrame
a pandas DataFrame (indexed on site id) with station information.
"""
# I haven't found a better list of stations, seems pretty janky
# to just have them in a file, and not sure if/when it is updated.
url = "http://cdec.water.ca.gov/misc/all_stations.csv"
# the csv is malformed, so some rows think there are 7 fields
col_names = ["id", "meta_url", "name", "num", "lat", "lon", "junk"]
df = pd.read_csv(url, names=col_names, header=None, quotechar="'", index_col=0)
return df
def get_sensors(sensor_id=None):
"""
Get a list of sensor ids as a DataFrame indexed on sensor number.
Can be limited by a list of numbers.
Parameters
----------
sites : iterable of integers or ``None``
Returns
-------
df : pandas DataFrame
a python dict with site codes mapped to site information
"""
url = "http://cdec.water.ca.gov/misc/senslist.html"
df = pd.read_html(url, header=0)[0]
df.set_index("Sensor No")
if sensor_id is None:
return df
return df.ix[sensor_id]
def get_station_sensors(station_ids=None, sensor_ids=None, resolutions=None):
"""Get available sensors for the given stations.
Get available sensors, sensor ids and time resolutions. If no station ids
are provided, all available stations will be used (this is not recommended,
and will probably take a really long time).
The list can be limited by a list of sensor numbers, or time resolutions
if you already know what you want. If none of the provided sensors or
resolutions are available, an empty DataFrame will be returned for that
station.
Parameters
----------
station_ids : iterable of strings or ``None``
sensor_ids : iterable of integers or ``None``
check out or use the ``get_sensors()`` function to see a list of
available sensor numbers
resolutions : iterable of strings or ``None``
Possible values are 'event', 'hourly', 'daily', and 'monthly' but not
all of these time resolutions are available at every station.
Returns
-------
dict : a python dict
a python dict with site codes as keys with values containing pandas
DataFrames of available sensor numbers and metadata.
"""
station_sensors = {}
unit_conv = {
"INCHES": "in",
"AF": "acre feet",
"CFS": "cfs",
"FEET": "ft",
}
if station_ids is None:
station_ids = get_stations().index
for station_id in station_ids:
url = "http://cdec.water.ca.gov/dynamicapp/staMeta?station_id={}".format(
station_id
)
sensor_list = pd.read_html(url)[1]
sensor_list.columns = [
"sensor_description",
"sensor_number",
"duration",
"plot",
"data_collection",
"data_available",
]
v = list(sensor_list["sensor_description"].to_dict().values())
split = [i.split(",") for i in v]
var_names = ["_".join(x[:-1]).strip() for x in split]
units = [x[-1][1:] for x in split]
units = [unit_conv.get(i, i) for i in units]
var_names = [":".join([i, j]) for i, j in zip(var_names, units)]
var_resolution = [x[1:-1] for x in sensor_list["duration"]]
sensor_list["resolution"] = var_resolution
sensor_list[
"variable"
] = var_names # [x + y for x, y in zip(var_names, var_resolution)]
station_sensors[station_id] = _limit_sensor_list(
sensor_list, sensor_ids, resolutions
)
return station_sensors
def get_data(station_ids=None, sensor_ids=None, resolutions=None, start=None, end=None):
"""Download data for a set of CDEC station and sensor ids.
If either is not provided, all available data will be downloaded. Be really
careful with choosing hourly resolution as the data sets are big, and
CDEC's servers are slow as molasses in winter.
Parameters
----------
station_ids : iterable of strings or ``None``
sensor_ids : iterable of integers or ``None``
check out or use the ``get_sensors()`` function to see a list of
available sensor numbers
resolutions : iterable of strings or ``None``
Possible values are 'event', 'hourly', 'daily', and 'monthly' but not
all of these time resolutions are available at every station.
Returns
-------
dict : a python dict
a python dict with site codes as keys. Values will be nested dicts
containing all of the sensor/resolution combinations.
"""
if start is None:
start_date = pd.Timestamp(DEFAULT_START_DATE).date()
else:
start_date = pd.Timestamp(start).date()
if end is None:
end_date = | pd.Timestamp(DEFAULT_END_DATE) | pandas.Timestamp |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df), expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df), expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = | Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s') | pandas.Series |
import pandas as pd
import numpy as np
import os
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
mos_data=pd.read_csv('data.csv')[['streaming_log','mos']]
data=[]
for l in os.listdir('./streaming_logs/'):
d_=pd.read_csv('./streaming_logs/'+l)
vmaf_change=0
smooth_count=0
rebuffer_count=0
for i in range(len(d_)):
if i>0:
if d_['rebuffering_duration'].iloc[i]>0:
rebuffer_count+=1
vmaf_change=vmaf_change+np.abs(d_['vmaf'].iloc[i]-d_['vmaf'].iloc[i-1])
smooth_count=smooth_count+(np.abs(d_['vmaf'].iloc[i]-d_['vmaf'].iloc[i-1]))//20
# if vmaf_change > 20:
# smooth_count+=1
data.append([l,d_['vmaf'].sum(), d_['rebuffering_duration'].sum(),rebuffer_count, vmaf_change, smooth_count])
agg_data=pd.DataFrame(data, columns=['streaming_log','total_vmaf','total_rebuffer','total_rebuffer_count','total_smooth_change','total_smooth_count'])
merged_data= | pd.merge(mos_data, agg_data, on='streaming_log') | pandas.merge |
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import cpu_count
import numpy as np
import pandas as pd
import torch
from scipy.stats import pearsonr
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
mean_squared_error,
)
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
BartConfig,
BartForQuestionAnswering,
BartTokenizer,
BertConfig,
BertForQuestionAnswering,
BertTokenizer,
DistilBertConfig,
DistilBertForQuestionAnswering,
DistilBertTokenizer,
ElectraConfig,
ElectraTokenizer,
LongformerConfig,
LongformerForQuestionAnswering,
LongformerTokenizer,
MobileBertConfig,
MobileBertForQuestionAnswering,
MobileBertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
XLMConfig,
XLMForQuestionAnswering,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import QuestionAnsweringArgs
from simpletransformers.config.utils import sweep_config_to_sweep_values
from simpletransformers.custom_models.models import ElectraForQuestionAnswering, XLMRobertaForQuestionAnswering
from simpletransformers.question_answering.question_answering_utils import (
LazyQuestionAnsweringDataset,
RawResult,
RawResultExtended,
build_examples,
convert_examples_to_features,
get_best_predictions,
get_best_predictions_extended,
get_examples,
squad_convert_examples_to_features,
to_list,
write_predictions,
write_predictions_extended,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class QuestionAnsweringModel:
def __init__(self, model_type, model_name, args=None, use_cuda=True, cuda_device=-1, **kwargs):
"""
Initializes a QuestionAnsweringModel model.
Args:
model_type: The type of model (bert, xlnet, xlm, distilbert)
model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
args (optional): Default args will be used if this parameter is not provided. If provided,
it should be a dict containing the args that should be changed in the default args'
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
""" # noqa: ignore flake8"
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
"auto": (AutoConfig, AutoTokenizer, AutoModelForQuestionAnswering),
"bart": (BartConfig, BartForQuestionAnswering, BartTokenizer),
"bert": (BertConfig, BertForQuestionAnswering, BertTokenizer),
"distilbert": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraForQuestionAnswering, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerForQuestionAnswering, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertForQuestionAnswering, MobileBertTokenizer),
"roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),
"xlm": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForQuestionAnswering, XLMRobertaTokenizer),
"xlnet": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),
}
self.args = self._load_model_args(model_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, QuestionAnsweringArgs):
self.args = args
if "sweep_config" in kwargs:
self.is_sweeping = True
sweep_config = kwargs.pop("sweep_config")
sweep_values = sweep_config_to_sweep_values(sweep_config)
self.args.update_from_dict(sweep_values)
else:
self.is_sweeping = False
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if not use_cuda:
self.args.fp16 = False
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
self.config = config_class.from_pretrained(model_name, **self.args.config)
if not self.args.quantized_model:
self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)
else:
quantized_weights = torch.load(os.path.join(model_name, "pytorch_model.bin"))
self.model = model_class.from_pretrained(None, config=self.config, state_dict=quantized_weights)
if self.args.dynamic_quantize:
self.model = torch.quantization.quantize_dynamic(self.model, {torch.nn.Linear}, dtype=torch.qint8)
if self.args.quantized_model:
self.model.load_state_dict(quantized_weights)
if self.args.dynamic_quantize:
self.args.quantized_model = True
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
self.results = {}
if self.args.fp16:
try:
from torch.cuda import amp
except AttributeError:
raise AttributeError("fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16.")
self.tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=self.args.do_lower_case, **kwargs)
self.args.model_name = model_name
self.args.model_type = model_type
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
def load_and_cache_examples(self, examples, evaluate=False, no_cache=False, output_examples=False):
"""
Converts a list of examples to a TensorDataset containing InputFeatures. Caches the InputFeatures.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
examples = get_examples(examples, is_training=not evaluate)
mode = "dev" if evaluate else "train"
cached_features_file = os.path.join(
args.cache_dir, "cached_{}_{}_{}_{}".format(mode, args.model_type, args.max_seq_length, len(examples)),
)
if os.path.exists(cached_features_file) and (
(not args.reprocess_input_data and not no_cache) or (mode == "dev" and args.use_cached_eval_features)
):
features = torch.load(cached_features_file)
logger.info(f" Features loaded from cache at {cached_features_file}")
else:
logger.info(" Converting to features started.")
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
tqdm_enabled=not args.silent,
threads=args.process_count,
args=args,
)
# if not no_cache:
# torch.save(features, cached_features_file)
# all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
# all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
# all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
# all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
# all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
# all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
# if evaluate:
# dataset = TensorDataset(
# all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask,
# )
# else:
# all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
# all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
# dataset = TensorDataset(
# all_input_ids,
# all_input_mask,
# all_segment_ids,
# all_start_positions,
# all_end_positions,
# all_cls_index,
# all_p_mask,
# )
if output_examples:
return dataset, examples, features
return dataset
def train_model(
self, train_data, output_dir=False, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs
):
"""
Trains the model using 'train_data'
Args:
train_data: Path to JSON file containing training data OR list of Python dicts in the correct format. The model will be trained on this data.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): Path to JSON file containing evaluation data against which evaluation will be performed when evaluate_during_training is enabled.
Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
global_step: Number of global steps trained
training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.silent:
show_running_loss = False
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
"Use --overwrite_output_dir to overcome.".format(output_dir)
)
self._move_model_to_device()
if self.args.lazy_loading:
if isinstance(train_data, str):
train_dataset = LazyQuestionAnsweringDataset(train_data, self.tokenizer, self.args)
else:
raise ValueError("Input must be given as a path to a file when using lazy loading")
else:
if isinstance(train_data, str):
with open(train_data, "r", encoding=self.args.encoding) as f:
train_examples = json.load(f)
else:
train_examples = train_data
train_dataset = self.load_and_cache_examples(train_examples)
os.makedirs(output_dir, exist_ok=True)
global_step, training_details = self.train(
train_dataset, output_dir, show_running_loss=show_running_loss, eval_data=eval_data, **kwargs
)
self.save_model(model=self.model)
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_type, output_dir))
return global_step, training_details
def train(self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
device = self.device
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for _ in train_iterator:
model.train()
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
else:
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar(
"loss", (tr_loss - logging_loss) / args.logging_steps, global_step,
)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results, _ = self.eval_model(eval_data, verbose=False, **kwargs)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results, _ = self.eval_model(eval_data, verbose=False, **kwargs)
self.save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = | pd.DataFrame(training_progress_scores) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"data, dtype",
[
(["x", "y", "z"], "string"),
pytest.param(
["x", "y", "z"],
"arrow_string",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
# https://github.com/pandas-dev/pandas/issues/35471
from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
ser = Series(data, dtype=dtype)
if errors == "ignore":
expected = ser
result = ser.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
ser.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = Series([0.1], dtype=dtype)
result = s.astype(str)
expected = Series(["0.1"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"value, string_value",
[
(None, "None"),
(np.nan, "nan"),
(NA, "<NA>"),
],
)
def test_astype_to_str_preserves_na(self, value, string_value):
# https://github.com/pandas-dev/pandas/issues/36904
s = Series(["a", "b", value], dtype=object)
result = s.astype(str)
expected = Series(["a", "b", string_value], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_unicode(self):
# see GH#7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode()]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_astype_bytes(self):
# GH#39474
result = Series(["foo", "bar", "baz"]).astype(bytes)
assert result.dtypes == np.dtype("S3")
class TestAstypeString:
@pytest.mark.parametrize(
"data, dtype",
[
([True, NA], "boolean"),
(["A", NA], "category"),
(["2020-10-10", "2020-10-10"], "datetime64[ns]"),
(["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),
(
["2012-01-01 00:00:00-05:00", NaT],
"datetime64[ns, US/Eastern]",
),
([1, None], "UInt16"),
(["1/1/2021", "2/1/2021"], "period[M]"),
(["1/1/2021", "2/1/2021", NaT], "period[M]"),
(["1 Day", "59 Days", NaT], "timedelta64[ns]"),
# currently no way to parse IntervalArray from a list of strings
],
)
def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request):
if dtype == "boolean" or (
dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data
):
mark = pytest.mark.xfail(
reason="TODO StringArray.astype() with missing values #GH40566"
)
request.node.add_marker(mark)
# GH-40351
s = Series(data, dtype=dtype)
tm.assert_series_equal(s, s.astype("string").astype(dtype))
class TestAstypeCategorical:
def test_astype_categorical_to_other(self):
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.RandomState(0).randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
expected = ser
tm.assert_series_equal(ser.astype("category"), expected)
tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
ser.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype("int")
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(ser.values), name="value_group")
cmp(ser.astype("object"), expected)
cmp(ser.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(ser), np.array(ser.values))
tm.assert_series_equal(ser.astype("category"), ser)
tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)
roundtrip_expected = ser.cat.set_categories(
ser.cat.categories.sort_values()
).cat.remove_unused_categories()
result = ser.astype("object").astype("category")
tm.assert_series_equal(result, roundtrip_expected)
result = ser.astype("object").astype(CategoricalDtype())
tm.assert_series_equal(result, roundtrip_expected)
def test_astype_categorical_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
with pytest.raises(TypeError, match=msg):
ser.astype(Categorical)
with pytest.raises(TypeError, match=msg):
ser.astype("object").astype(Categorical)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH#10696, GH#18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categories_raises(self):
# deprecated GH#17636, removed in GH#27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])
def test_astype_from_categorical(self, items):
ser = Series(items)
exp = Series(Categorical(items))
res = ser.astype("category")
tm.assert_series_equal(res, exp)
def test_astype_from_categorical_with_keywords(self):
# with keywords
lst = ["a", "b", "c", "a"]
ser = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = ser.astype( | CategoricalDtype(None, ordered=True) | pandas.CategoricalDtype |
from db.DBConnector import execute_query
from utils.log import log_init, log_close, log
import time
from os import path
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# the ids are not included as they are the same for every table: id : long
classMetricsEntities = ["classAnonymousClassesQty",
"classAssignmentsQty",
"classComparisonsQty",
"classLambdasQty",
"classLoopQty",
"classMathOperationsQty",
"classMaxNestedBlocks",
"classNosi",
"classNumberOfAbstractMethods",
"classNumberOfDefaultFields",
"classNumberOfDefaultMethods",
"classNumberOfFields",
"classNumberOfFinalFields",
"classNumberOfFinalMethods",
"classNumberOfMethods",
"classNumberOfPrivateFields",
"classNumberOfPrivateMethods",
"classNumberOfProtectedFields",
"classNumberOfProtectedMethods",
"classNumberOfPublicMethods",
"classNumberOfStaticFields",
"classNumberOfStaticMethods",
"classNumberOfSynchronizedMethods",
"classNumbersQty",
"classParenthesizedExpsQty",
"classReturnQty",
"classStringLiteralsQty",
"classSubClassesQty",
"classTryCatchQty",
"classVariablesQty"]
classMetricsComplex = ["classCbo",
"classLcom",
"classLoc",
"classRfc",
"classWmc",
"classTCC",
"classLCC"]
processMetricsFields = ["bugFixCount",
"qtyMajorAuthors",
"qtyMinorAuthors",
"qtyOfAuthors",
"qtyOfCommits"]
methodMetricsSize = ["methodLoc",
"methodUniqueWordsQty"]
methodMetrics = ["methodAnonymousClassesQty",
"methodAssignmentsQty",
"methodCbo",
"methodComparisonsQty",
"methodLambdasQty",
"methodLoopQty",
"methodMathOperationsQty",
"methodMaxNestedBlocks",
"methodNumbersQty",
"methodParametersQty",
"methodParenthesizedExpsQty",
"methodReturnQty",
"methodRfc",
"methodStringLiteralsQty",
"methodSubClassesQty",
"methodTryCatchQty",
"methodVariablesQty",
"methodWmc"]
def plot_bar(data, x_labels, group_labels, scale: str = "linear", title: str = ""):
fig, ax = plt.subplots(figsize=(max(len(x_labels), 6), 12), dpi=120)
barWidth = 1 / (len(group_labels) + 1)
x = np.arange(len(x_labels))
for index, row in data.iterrows():
ax.bar(x + index * barWidth, row, width=barWidth, edgecolor='white', label=group_labels[index])
plt.xticks(x, x_labels, rotation='30')
ax.set_ylabel('AVG')
ax.set_title(title)
plt.yscale(scale)
ax.legend()
fig_path = f"results/Metrics/{title}_{scale}_mean.png"
plt.savefig(fig_path)
def plot_box(data, label, title, scale: str = "log"):
fig, ax = plt.subplots(figsize=(max(len(label), 32), 12), dpi=120)
ax.set_title(f"{title}")
ax.boxplot(data, showfliers=False)
plt.xticks(np.arange(1, len(label) + 1), label, rotation=30)
plt.yscale(scale)
plt.xlabel("Metrics")
fig_path = f"results/Metrics/{title}_{scale}_box_plot.png"
plt.savefig(fig_path)
log(f"Saved box plot to {fig_path}")
def query_avg(table_name: str, function: str, metrics, descriptor: str, group: bool):
file_path = f"results/Metrics/{table_name}_{function}_{descriptor}.csv"
Path(path.dirname(file_path)).mkdir(parents=True, exist_ok=True)
if not path.exists(file_path):
metrics_query = ', '.join([f"{function}({metric}) AS \"{metric}\"" for metric in metrics])
if group:
query = f"SELECT {metrics_query} FROM {table_name} group by level"
else:
query = f"SELECT {metrics_query} FROM {table_name}"
dataframe = execute_query(query)
dataframe.to_csv(file_path, index=False)
log(f"Got the data from {table_name} for these metrics: {metrics} for the aggregate function: {function}.")
else:
dataframe = | pd.read_csv(file_path) | pandas.read_csv |
#!/usr/bin/env python3
'''
FILE: nav01_parser.py
DESCRIPTION: Nav01 parser class for raw output from a Furuno GP-90D GPS reciever
Data file contains GGA/ZDA/VTG NMEA0183 sentences with no additional
information added.
BUGS:
NOTES:
AUTHOR: <NAME>
COMPANY: OceanDataTools
VERSION: 0.2
CREATED: 2021-04-24
REVISION: 2021-05-05
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools 2021
'''
import re
import csv
import sys
import logging
from datetime import datetime, timedelta
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
import pandas as pd
from geopy import Point
from geopy.distance import distance
from lib.nav_manager import NavParser, R2RNAV_COLS
from lib.utils import calculate_bearing
DESCRIPTION = "Nav parser for raw output from a Furuno GP-90D GPS reciever. Data file contains GGA/ZDA/VTG NMEA0183 sentences with no additional information added."
EXAMPLE_DATA = """
$GPGGA,123034,2447.9660,N,12221.8670,E,2,9,0.3,38,M,,M,,*40
$GPVTG,147.2,T,150.9,M,7.6,N,14.1,K*76
$GPZDA,123034,23,08,2009,00,00*4D
$GPGGA,123035,2447.9641,N,12221.8681,E,2,9,0.4,38,M,,M,,*4B
$GPVTG,147.2,T,150.9,M,7.6,N,14.1,K*76
$GPZDA,123035,23,08,2009,00,00*4C
"""
raw_gga_cols = ['hdr','sensor_time','latitude','NS','longitude','EW','nmea_quality','nsv','hdop','antenna_height','antenna_height_m','height_wgs84','height_wgs84_m','last_update','dgps_station_checksum']
raw_vtg_cols = ['hdr','heading_true','True','heading_mag','Mag','speed_kts','Knots','speed_kph','Kph_checksum']
raw_zda_cols = ['hdr','sensor_time','day','month','year','tz_hr','tz_min_checksum']
SENSOR_TIMESTAMP_FORMAT = "%H%M%S"
class Nav01Parser(NavParser):
'''
Parser class for raw output from a Furuno GP-90D GPS reciever Data file
contains GGA/ZDA/VTG NMEA0183 sentences with no additional information
added.
'''
def __init__(self):
super().__init__(name="nav01", description=DESCRIPTION, example_data=EXAMPLE_DATA)
@staticmethod
def _hemisphere_correction(coordinate, hemisphere):
if hemisphere in ('W', "S"):
return coordinate * -1.0
return coordinate
@staticmethod
def _verify_checksum(sentence):
cksum = sentence[len(sentence) - 2:]
chksumdata = re.sub("(\n|\r\n)","", sentence[sentence.find("$")+1:sentence.find("*")])
csum = 0
for char in chksumdata:
# XOR'ing value of csum against the next char in line
# and storing the new XOR value in csum
csum ^= ord(char)
return 1 if hex(csum) == hex(int(cksum, 16)) else 0
def parse_file(self, filepath): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
"""
Process the provided file
"""
zda_into_df = { 'lineno': [], 'date': [] }
vtg_into_df = { 'lineno': [], 'speed_made_good': [], 'course_made_good': [] }
gga_into_df = { 'lineno': [], 'sensor_time': [], 'ship_latitude': [], 'ship_longitude': [], 'nmea_quality': [], 'nsv': [], 'hdop': [], 'antenna_height': [], 'valid_cksum': [], 'valid_parse': [] }
try:
with open(filepath, 'r') as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
if row[0] == '$GPZDA':
date = None
if len(row) != len(raw_zda_cols):
logging.warning("Parsing Error: (line: %s) %s", csv_reader.line_num, ','.join(row))
else:
try:
date = datetime.strptime(row[4] + row[3] + row[2], "%Y%m%d")
except Exception as err:
logging.warning("Parsing Error: (line: %s) %s", csv_reader.line_num, ','.join(row))
logging.debug(str(err))
zda_into_df['lineno'].append(csv_reader.line_num)
zda_into_df['date'].append(date)
elif row[0] == '$GPVTG':
speed_made_good = None
course_made_good = None
if len(row) != len(raw_vtg_cols):
logging.warning("Parsing Error: (line: %s) %s", csv_reader.line_num, ','.join(row))
else:
try:
speed_made_good = float(row[7])*1000/3600
course_made_good = float(row[1])
except Exception as err:
logging.warning("Parsing Error: (line: %s) %s", csv_reader.line_num, ','.join(row))
logging.debug(str(err))
vtg_into_df['lineno'].append(csv_reader.line_num)
vtg_into_df['speed_made_good'].append(speed_made_good)
vtg_into_df['course_made_good'].append(course_made_good)
elif row[0] == '$GPGGA':
sensor_time = None
ship_latitude = None
ship_longitude = None
nmea_quality = None
nsv = None
hdop = None
antenna_height = None
valid_cksum = None
valid_parse = 0
if len(row) != len(raw_gga_cols):
logging.warning("Parsing Error 1: (line: %s) %s", csv_reader.line_num, ','.join(row))
else:
try:
sensor_time = datetime.strptime(row[1], SENSOR_TIMESTAMP_FORMAT)
ship_latitude = self._hemisphere_correction(float(row[2][:2]) + float(row[2][2:])/60, row[3])
ship_longitude = self._hemisphere_correction(float(row[4][:3]) + float(row[4][3:])/60, row[5])
nmea_quality = int(row[6])
nsv = int(row[7])
hdop = float(row[8])
antenna_height = float(row[9])
valid_cksum = self._verify_checksum(','.join(row))
valid_parse = 1
except Exception as err:
logging.warning("Parsing Error 2: (line: %s) %s", csv_reader.line_num, ','.join(row))
logging.debug(str(err))
gga_into_df['lineno'].append(csv_reader.line_num)
gga_into_df['sensor_time'].append(sensor_time)
gga_into_df['ship_latitude'].append(ship_latitude)
gga_into_df['ship_longitude'].append(ship_longitude)
gga_into_df['nmea_quality'].append(nmea_quality)
gga_into_df['nsv'].append(nsv)
gga_into_df['hdop'].append(hdop)
gga_into_df['antenna_height'].append(antenna_height)
gga_into_df['valid_cksum'].append(valid_cksum)
gga_into_df['valid_parse'].append(valid_parse)
except Exception as err:
logging.error("Problem accessing input file: %s", filepath)
logging.error(str(err))
return None
zda_df = pd.DataFrame(zda_into_df)
del zda_into_df
zda_df.drop_duplicates(subset=['date'], keep='first', inplace=True)
vtg_df = pd.DataFrame(vtg_into_df)
del vtg_into_df
gga_df = pd.DataFrame(gga_into_df)
del gga_into_df
# Merge GGA with VTG to create data
data = pd.merge_asof(gga_df, vtg_df, on="lineno", direction="forward")
# Merge data with ZDA
data = | pd.merge_asof(data, zda_df, on="lineno") | pandas.merge_asof |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
RMDL: Random Multimodel Deep Learning for Classification
* Copyright (C) 2018 <NAME> <<EMAIL>>
* Last Update: May 3rd, 2018
* This file is part of RMDL project, University of Virginia.
* Free to use, change, share and distribute source code of RMDL
* Refrenced paper : RMDL: Random Multimodel Deep Learning for Classification
* Refrenced paper : An Improvement of Data Classification using Random Multimodel Deep Learning (RMDL)
* Comments and Error: email: <EMAIL>
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import os
from RMDL import text_feature_extraction as txt
from sklearn.model_selection import train_test_split
from RMDL.Download import Download_WOS as WOS
import numpy as np
from RMDL import RMDL_Text as RMDL
import tensorflow as tf
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.utils import shuffle
def load_data(file_name, sample_ratio=1, n_class=15, one_hot=True):
'''load data from .csv file'''
names = ["class", "title", "content"]
csv_file = pd.read_csv(file_name, names=names)
shuffle_csv = csv_file.sample(frac=sample_ratio)
x = pd.Series(shuffle_csv["content"])
y = | pd.Series(shuffle_csv["class"]) | pandas.Series |
import os
import logging
from collections import defaultdict
import pandas as pd
from fol.foq_v2 import (concate_n_chains, copy_query,
negation_sink,
binary_formula_iterator,
concate_iu_chains,
parse_formula,
decompose_D, to_D,
union_bubble,
DeMorgan_replacement,
to_d,
transformation)
def convert_log_to_csv(logfile, outfile):
already = False
if os.path.exists(outfile):
already = True
already_df = pd.read_csv(outfile)
formula_id_set = set(already_df.formula_id)
original_set = set(already_df.original)
outfile = outfile.replace(".csv", "_extend.csv")
formula_id_set = set()
original_set = ()
data_dict = defaultdict(list)
with open(logfile, 'rt') as f:
for line in f.readlines():
line = line.strip()
*_, rtype, schema, data = line.split(":")
row_data = dict()
if rtype == 'record':
for k, v in zip(schema.split('\t'), data.split('\t')):
row_data[k.strip()] = v.strip()
if row_data['original'] in original_set:
continue
if row_data['formula_id'] in formula_id_set:
num = int(row_data['formula_id'][-4:])
while True:
new_key = f"type{num+1:04d}"
if new_key not in formula_id_set:
row_data['formula_id'] = new_key
formula_id_set.add(new_key)
break
num += 1
for k in row_data:
data_dict[k].append(row_data[k])
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
import json
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
def cluster(cohort_submissions: dict) -> list:
"""
Splits given dict into clusters of 4 based on their ranked complexity
The 'remainder problem' of needing to have 4 submissions per cluster,
regardless of number of submissions, is solved here by duplicating
submission IDs across clusters to supplement any clusters with less than 4,
with no clusters containing more than 1 submission_ID that also
appears in another cluster, unless there are fewer than 3 total
submissions, or exactly 5 submissions, in the given cohort.
Input: dictionary of a single cohort containing nested dictionary
with 'submission_id' as first level key,
and 'complexity' as one of the inner keys
Output: JSON object of clusters:
{1: [list of submission_ids], 2: [list of submission_ids]}
"""
# Generate DataFrame from dict
df = | pd.DataFrame.from_dict(cohort_submissions, orient="index") | pandas.DataFrame.from_dict |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# first, REMEMBER to activate cryptoalgowheel-S2 environment!
# %%
import datetime
import os
import sys
import backtrader as bt
import numpy as np
import pandas as pd
import matplotlib
import PyQt5
# %%
#*****WARNING: REVISE THE "dir" FOLDER PATHS!!!
datadir = "./data"
logdir = "./log"
reportdir = "./report"
datafile = "BTC_USDT_1h.csv" #!NOTICE: use our data "BTC_USDT_1h.csv" here
from_datetime = "2020-01-01 00:00:00"
to_datetime = "2020-04-01 00:00:00"
# %%
class OptDoubleSMACross(bt.Strategy):
params = (
("pfast", 10),
("pslow", 20),
)
def log(self, txt, dt=None, doprint=False): #(by default don't print log here)
if doprint:
dt = dt or self.datas[0].datetime.date(0)
print("%s, %s" % (dt.isoformat(), txt))
def __init__(self):
self.dataclose = self.datas[0].close
# add both "fast" and "slow" SimpleMovingAverage indicators
self.fastsma = bt.indicators.SimpleMovingAverage(self.datas[0], period = self.params.pfast)
self.slowsma = bt.indicators.SimpleMovingAverage(self.datas[0], period = self.params.pslow)
# add a "CrossOver" signal!!
self.crossover = bt.indicators.CrossOver(self.fastsma, self.slowsma) #NOTICE here passing in "fast" SMA as 1st line, "slow" SMA as 2nd line
#["CrossOver" indicator Usage reference: https://www.backtrader.com/home/helloalgotrading/; documentation: https://www.backtrader.com/docu/indautoref/#crossover]
def next(self):
if not self.position: #if not in the market yet (no "position" yet)
if self.crossover > 0: # "CrossOver" function return 1.0: meaning "fast SMA"(1st line) crosses the "slow SMA"(2nd line) upwards
#--BUY!
self.buy()
else: #("already in the market")
if self.crossover < 0: #"CrossOver" function return -1.0: meaning "fast SMA"(1st line) crosses the "slow SMA"(2nd line) downwards
#--SELL!
self.sell()
#*** added "Strategy hook" here - "stop" method, in order to record the portfolio final net value of each optimization round:
def stop(self):
self.log("Fast SMA Period %2d, Slow SMA Period %2d: Ending Value %.2f" %
(self.params.pfast, self.params.pslow, self.broker.getvalue()), doprint=True) #(do print the log message by the end of each optimization round here)
# %%
if __name__ == "__main__":
cerebro = bt.Cerebro()
# feed data:
data = pd.read_csv(os.path.join(datadir, datafile), index_col="datetime", parse_dates=True)
data = data.loc[(data.index >= | pd.to_datetime(from_datetime) | pandas.to_datetime |
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch
from mbpert.loss import reg_loss_interaction, reg_loss_r, reg_loss_eps
from mbpert.mbpert import MBPertDataset, MBPert
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
if __name__ == '__main__':
trainset = MBPertDataset("data/x0_train.txt",
"data/x_ss_train.txt",
"data/P_train.txt")
testset = MBPertDataset("data/x0_test.txt",
"data/x_ss_test.txt",
"data/P_test.txt")
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
testloader = DataLoader(testset, batch_size=32, shuffle=True)
n_species = trainset.n_species
mbpert = MBPert(n_species)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(mbpert.parameters())
# Log history
writer = SummaryWriter()
for epoch in range(500):
running_loss = 0.0 # printing loss statistics per batch
epoch_loss = 0.0 # plotting training loss curve
for i, data in enumerate(trainloader, 0):
# Get the input batch
(x0, p), responses = data
# Get the inputs that model.forward() accepts
x0 = torch.ravel(x0)
p = torch.t(p)
# and responses
x_ss = torch.ravel(responses)
# Forward pass
x_pred = mbpert(x0, p)
# Compute loss (MSE + reg)
loss = criterion(x_pred, x_ss)
loss = loss + reg_loss_interaction(mbpert.A) + reg_loss_r(mbpert.r) + reg_loss_eps(mbpert.eps)
# Zero gradients, perform a backward pass, and update parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 5 == 4: # print every 5 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 5))
running_loss = 0.0
epoch_loss += loss.item()
# Log epoch loss (per batch)
writer.add_scalar("Loss/train", epoch_loss/(i+1), epoch)
# Log test set loss
epoch_loss_test = 0.0
with torch.no_grad():
for i, testdata in enumerate(testloader, 0):
(x0, p), responses = testdata
x0 = torch.ravel(x0)
p = torch.t(p)
x_ss = torch.ravel(responses)
x_pred = mbpert(x0, p)
# Compute loss (MSE + reg)
loss = criterion(x_pred, x_ss)
loss = loss + reg_loss_interaction(mbpert.A) + reg_loss_r(mbpert.r) + reg_loss_eps(mbpert.eps)
epoch_loss_test += loss.item()
writer.add_scalar("Loss/test", epoch_loss_test/(i+1), epoch)
writer.flush()
writer.close()
# Comparing true and predicted A, r, eps
A = np.loadtxt("data/A.txt", dtype=np.float32)
r = np.loadtxt("data/r.txt", dtype=np.float32)
eps = np.loadtxt("data/eps.txt", dtype=np.float32)
A_error_heatmap = sns.heatmap(np.abs(A - mbpert.A.detach().numpy()), center=0, annot=True, fmt='.2f')
A_error_heatmap = A_error_heatmap.set_title("Absolute error for A")
A_error_heatmap.get_figure().savefig("data/figs/A_error_heatmap.png")
r_df = pd.DataFrame(data={'pred': mbpert.r.detach().numpy(),
'true': r,
'param': 'r'})
eps_df = pd.DataFrame(data={'pred': mbpert.eps.detach().numpy(),
'true': eps,
'param': 'eps'})
fig, ax = plt.subplots(figsize=(6, 4))
r_eps_pred_vs_true = sns.scatterplot(data=pd.concat([r_df, eps_df]), x='pred', y='true', hue='param', ax=ax)
r_eps_pred_vs_true = sns.lineplot(x=np.linspace(-0.5, 1.5), y=np.linspace(-0.5, 1.5), color='g', ax=ax)
r_eps_pred_vs_true.get_figure().savefig("data/figs/r_eps_pred_vs_true.png")
# Plot predicted vs true steady states in test set
x_ss_test = []
x_pred_test = []
with torch.no_grad():
for i, testdata in enumerate(testloader, 0):
(x0, p), responses = testdata
x0 = torch.ravel(x0)
p = torch.t(p)
x_ss = torch.ravel(responses)
x_pred = mbpert(x0, p)
x_ss_test.append(x_ss)
x_pred_test.append(x_pred)
x_ss_test = torch.cat(x_ss_test, dim=0)
x_pred_test = torch.cat(x_pred_test, dim=0)
x_df = | pd.DataFrame(data={'pred': x_pred_test, 'true': x_ss_test, 'value': 'x'}) | pandas.DataFrame |
from __future__ import print_function, absolute_import, division
import pandas as pd
import numpy as np
import argparse
import json
import math
import re
import os
import sys
import csv
import socket # -- ip checks
import seaborn as sns
import matplotlib.pyplot as plt
from jinja2 import Environment, PackageLoader
# --- functions ---
def get_config(config):
""" convert json config file into a python dict """
with open(config, 'r') as f:
config_dict = json.load(f)[0]
return config_dict
# -- load data --
def get_dataframe(config):
""" load csv into python dataframe """
df = pd.read_csv(config['input_file'], low_memory=False)
return df
# --
def get_overview(config, df):
""" return details of the dataframe and any issues found """
overview_msg = {}
df = df.copy()
column_cnt = len(df.columns)
try:
df['EVENT_TIMESTAMP'] = pd.to_datetime(df[config['required_features']['EVENT_TIMESTAMP']], infer_datetime_format=True)
date_range = df['EVENT_TIMESTAMP'].min().strftime('%Y-%m-%d') + ' to ' + df['EVENT_TIMESTAMP'].max().strftime('%Y-%m-%d')
day_cnt = (df['EVENT_TIMESTAMP'].max() - df['EVENT_TIMESTAMP'].min()).days
except:
overview_msg[config['required_features']['EVENT_TIMESTAMP']] = " Unable to convert" + config['required_features']['EVENT_TIMESTAMP'] + " to timestamp"
date_range = ""
day_cnt = 0
record_cnt = df.shape[0]
memory_size = df.memory_usage(index=True).sum()
record_size = round(float(memory_size) / record_cnt,2)
n_dupe = record_cnt - len(df.drop_duplicates())
if record_cnt <= 10000:
overview_msg["Record count"] = "A minimum of 10,000 rows are required to train the model, your dataset contains " + str(record_cnt)
overview_stats = {
"Record count" : "{:,}".format(record_cnt) ,
"Column count" : "{:,}".format(column_cnt),
"Duplicate count" : "{:,}".format(n_dupe),
"Memory size" : "{:.2f}".format(memory_size/1024**2) + " MB",
"Record size" : "{:,}".format(record_size) + " bytes",
"Date range" : date_range,
"Day count" : "{:,}".format(day_cnt) + " days",
"overview_msg" : overview_msg,
"overview_cnt" : len(overview_msg)
}
return df, overview_stats
def set_feature(row, config):
""" sets the feature type of each variable in the file, identifies features with issues
as well as the required features. this is the first pass of rules
"""
rulehit = 0
feature = ""
message = ""
required_features = config['required_features']
# -- assign numeric --
if ((row._dtype in ['float64', 'int64']) and (row['nunique'] > 1)):
feature = "numeric"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign categorical --
if ((row._dtype == 'object') and ( row.nunique_pct <= 0.75)):
feature = "categorical"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- assign categorical to numerics --
if ((row._dtype in ['float64', 'int64']) and ( row['nunique'] <= 1024 )):
feature = "categorical"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign binary --
if (row['nunique'] == 2 ):
feature = "categorical"
message = "(" + "{:}".format(row['nunique']) + ") binary"
# -- single value --
if (row['nunique'] == 1):
rulehit = 1
feature = "exclude"
message = "(" + "{:}".format(row['nunique']) + ") single value"
# -- null pct --
if (row.null_pct >= 0.50 and (rulehit == 0)):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.null_pct*100) + "%) missing "
# -- categorical w. high % unique
if ((row._dtype == 'object') and ( row.nunique_pct >= 0.75)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- numeric w. extreeme % unique
if ((row._dtype in ['float64', 'int64']) and ( row.nunique_pct >= 0.95)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
if ('EMAIL_ADDRESS' in required_features) and (row._column == required_features['EMAIL_ADDRESS']):
feature = "EMAIL_ADDRESS"
if ('IP_ADDRESS' in required_features) and (row._column == required_features['IP_ADDRESS']):
feature = "IP_ADDRESS"
if row._column == required_features['EVENT_TIMESTAMP']:
feature = "EVENT_TIMESTAMP"
if row._column == required_features['EVENT_LABEL']:
feature = "EVENT_LABEL"
return feature, message
def get_label(config, df):
""" returns stats on the label and performs intial label checks """
message = {}
label = config['required_features']['EVENT_LABEL']
label_summary = df[label].value_counts()
rowcnt = df.shape[0]
label_dict = {
"label_field" : label,
"label_values" : df[label].unique(),
"label_dtype" : label_summary.dtype,
"fraud_rate" : "{:.2f}".format((label_summary.min()/label_summary.sum())*100),
"fraud_label": str(label_summary.idxmin()),
"fraud_count": label_summary.min(),
"legit_rate" : "{:.2f}".format((label_summary.max()/label_summary.sum())*100),
"legit_count": label_summary.max(),
"legit_label": str(label_summary.idxmax()),
"null_count" : "{:,}".format(df[label].isnull().sum(axis = 0)),
"null_rate" : "{:.2f}".format(df[label].isnull().sum(axis = 0)/rowcnt),
}
"""
label checks
"""
if label_dict['fraud_count'] <= 500:
message['fraud_count'] = "Fraud count " + str(label_dict['fraud_count']) + " is less than 500\n"
if df[label].isnull().sum(axis = 0)/rowcnt >= 0.01:
message['label_nulls'] = "Your LABEL column contains " + label_dict["null_count"] +" a significant number of null values"
label_dict['warnings'] = len(message)
return label_dict, message
def get_partition(config, df):
""" evaluates your dataset partitions and checks the distribution of fraud lables """
df = df.copy()
row_count = df.shape[0]
required_features = config['required_features']
message = {}
stats ={}
try:
df['_event_timestamp'] = pd.to_datetime(df[required_features['EVENT_TIMESTAMP']])
df['_dt'] = pd.to_datetime(df['_event_timestamp'].dt.date)
except:
message['_event_timestamp'] = "could not parse " + required_features['EVENT_TIMESTAMP'] + " into a date or timestamp object"
df['_event_timestamp'] = df[required_features['EVENT_TIMESTAMP']]
df['_dt'] = df['_event_timestamp']
label_summary = df[required_features['EVENT_LABEL']].value_counts()
legit_label = label_summary.idxmax()
fraud_label = label_summary.idxmin()
df = df.sort_values(by=['_event_timestamp']).reset_index(drop=True)
ctab = pd.crosstab(df['_dt'].astype(str), df[required_features['EVENT_LABEL']]).reset_index()
stats['labels'] = ctab['_dt'].tolist()
stats['legit_rates'] = ctab[legit_label].tolist()
stats['fraud_rates'] = ctab[fraud_label].tolist()
# -- set partitions --
df['partition'] = 'training'
df.loc[math.ceil(row_count*.7):math.ceil(row_count*.85),'partition'] = 'evaluation'
df.loc[math.ceil(row_count*.85):,'partition'] = 'testing'
message = ""
return stats, message
def get_stats(config, df):
""" generates the key column analysis statistics calls set_features function """
df = df.copy()
rowcnt = len(df)
df_s1 = df.agg(['count', 'nunique',]).transpose().reset_index().rename(columns={"index":"_column"})
df_s1['count'] = df_s1['count'].astype('int64')
df_s1['nunique'] = df_s1['nunique'].astype('int64')
df_s1["null"] = (rowcnt - df_s1["count"]).astype('int64')
df_s1["not_null"] = rowcnt - df_s1["null"]
df_s1["null_pct"] = df_s1["null"] / rowcnt
df_s1["nunique_pct"] = df_s1['nunique'] / rowcnt
dt = pd.DataFrame(df.dtypes).reset_index().rename(columns={"index":"_column", 0:"_dtype"})
df_stats = | pd.merge(dt, df_s1, on='_column', how='inner') | pandas.merge |
### ETL script for generating input tables to model
### main point: ETL JHU covid-19 case and mortality data
# todo: refactor
## HERE! -> not handled here in python -> Serial Interval Table -> would be worthwhile to reproduce the R for that here
## "serial interval table" <--> that discretized gamma distribution
## so that this script does indeed produce all the required input tables for the model
## alternatively, could just generate that discretized gamma distribution in the R code itself, pre-simulation
## I don't like that idea -> will try to reproduce results in python - but not now -> other more pressing tasks now
import os
import numpy as np
import warnings
warnings.simplefilter(
action="ignore", category=FutureWarning
) # suppress pandas "future warning"
import pandas as pd
def makeCaseMortalityTable(dirPath):
print("\n~ COVID-19 CASE-MORTALITY TABLE ~")
# E
print("--- extracting JHU covid-19 case and mortality data ---")
# what's the issue here? sometimes stalls for some reason -> fetching data from git
# fetch the JHU time-series data
# see: https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/README.md
jhu = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
cases_csv = "time_series_covid19_confirmed_US.csv"
deaths_csv = "time_series_covid19_deaths_US.csv"
casesOrig = pd.read_csv(jhu + cases_csv)
deathsOrig = | pd.read_csv(jhu + deaths_csv) | pandas.read_csv |
import json
import logging
import pandas as pd
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
api_url = "https://api.glassnode.com/v1/metrics/"
GLASSNODE_SUPPORTED_HASHRATE_ASSETS = ["BTC", "ETH"]
GLASSNODE_SUPPORTED_EXCHANGES = [
"aggregated",
"binance",
"bittrex",
"coinex",
"gate.io",
"gemini",
"huobi",
"kucoin",
"poloniex",
"bibox",
"bigone",
"bitfinex",
"hitbtc",
"kraken",
"okex",
"bithumb",
"zb.com",
"cobinhood",
"bitmex",
"bitstamp",
"coinbase",
"coincheck",
"luno",
]
GLASSNODE_SUPPORTED_ASSETS = [
"BTC",
"ETH",
"LTC",
"AAVE",
"ABT",
"AMPL",
"ANT",
"ARMOR",
"BADGER",
"BAL",
"BAND",
"BAT",
"BIX",
"BNT",
"BOND",
"BRD",
"BUSD",
"BZRX",
"CELR",
"CHSB",
"CND",
"COMP",
"CREAM",
"CRO",
"CRV",
"CVC",
"CVP",
"DAI",
"DDX",
"DENT",
"DGX",
"DHT",
"DMG",
"DODO",
"DOUGH",
"DRGN",
"ELF",
"ENG",
"ENJ",
"EURS",
"FET",
"FTT",
"FUN",
"GNO",
"GUSD",
"HEGIC",
"HOT",
"HPT",
"HT",
"HUSD",
"INDEX",
"KCS",
"LAMB",
"LBA",
"LDO",
"LEO",
"LINK",
"LOOM",
"LRC",
"MANA",
"MATIC",
"MCB",
"MCO",
"MFT",
"MIR",
"MKR",
"MLN",
"MTA",
"MTL",
"MX",
"NDX",
"NEXO",
"NFTX",
"NMR",
"Nsure",
"OCEAN",
"OKB",
"OMG",
"PAX",
"PAY",
"PERP",
"PICKLE",
"PNK",
"PNT",
"POLY",
"POWR",
"PPT",
"QASH",
"QKC",
"QNT",
"RDN",
"REN",
"REP",
"RLC",
"ROOK",
"RPL",
"RSR",
"SAI",
"SAN",
"SNT",
"SNX",
"STAKE",
"STORJ",
"sUSD",
"SUSHI",
"TEL",
"TOP",
"UBT",
"UMA",
"UNI",
"USDC",
"USDK",
"USDT",
"UTK",
"VERI",
"WaBi",
"WAX",
"WBTC",
"WETH",
"wNMX",
"WTC",
"YAM",
"YFI",
"ZRX",
]
INTERVALS_HASHRATE = ["24h", "1w", "1month"]
INTERVALS_ACTIVE_ADDRESSES = ["24h", "1w", "1month"]
INTERVALS_NON_ZERO_ADDRESSES = ["24h"]
INTERVALS_DISPLAY_EXCHANGE_NET_POSITION_CHANGE = ["24h"]
INTERVALS_EXCHANGE_BALANCES = ["24h"]
@log_start_end(log=logger)
def get_close_price(asset: str, interval: str, since: int, until: int) -> pd.DataFrame:
"""Returns the price of a cryptocurrency
[Source: https://glassnode.com]
Parameters
----------
asset : str
Crypto to check close price (BTC or ETH)
since : int
Initial date timestamp (e.g., 1_614_556_800)
until : int
End date timestamp (e.g., 1_641_227_783_561)
interval : str
Interval frequency (e.g., 24h)
Returns
-------
pd.DataFrame
price over time
"""
url = api_url + "market/price_usd_close"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": asset,
"i": interval,
"s": str(since),
"u": str(until),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
console.print(f"No data found for {asset} price.\n")
else:
df = df.set_index("t")
df.index = pd.to_datetime(df.index, unit="s")
elif r.status_code == 401:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(r.text)
return df
@log_start_end(log=logger)
def get_non_zero_addresses(
asset: str, interval: str, since: int, until: int
) -> pd.DataFrame:
"""Returns addresses with non-zero balance of a certain asset
[Source: https://glassnode.com]
Parameters
----------
asset : str
Asset to search (e.g., BTC)
since : int
Initial date timestamp (e.g., 1_577_836_800)
until : int
End date timestamp (e.g., 1_609_459_200)
interval : str
Interval frequency (e.g., 24h)
Returns
-------
pd.DataFrame
addresses with non-zero balances
"""
url = api_url + "addresses/non_zero_count"
parameters = {
"api_key": cfg.API_GLASSNODE_KEY,
"a": asset,
"i": interval,
"s": str(since),
"u": str(until),
}
r = requests.get(url, params=parameters)
df = pd.DataFrame()
if r.status_code == 200:
df = pd.DataFrame(json.loads(r.text))
if df.empty:
console.print(f"No data found for {asset}'s non-zero addresses.\n")
else:
df["t"] = | pd.to_datetime(df["t"], unit="s") | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from datetime import datetime
import copy
import json
import numpy as np
from os import listdir
from os.path import isfile, join
import unknown
import baseline
import known_approx as kapprox
import mary_optimal as mary
dirs = ['/localdisk1/DOT/flights/2018', '/localdisk1/DOT/flights/2019', '/localdisk1/DOT/flights/2020']
sourcesdir = '/localdisk1/DOT/csv/'
outputjsondir = '/localdisk1/DOT/json/'
outputdir = '/localdisk1/DOT'
alldata = '/localdisk1/DOT/allflights.csv'
# number of runs
nr = 15
# data set size
n = 5000
def merge_files():
fs = [join(d, f) for d in dirs for f in listdir(d) if isfile(join(d, f))]
#dfs = [pd.read_csv(f) for f in fs]
dfs = []
for f in fs:
dfs.append(pd.read_csv(f))
onedf = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import numpy as np
d= pd.read_csv(snakemake.input[0], sep= '\t', header=0, compression= 'gzip')
d= d.loc[~d['#chrom'].str.contains('_'), :]
d['a1']= d.alts.str.split(',').str[0]
d['a2']= d.alts.str.split(',').str[1]
d['#chrom']= d['#chrom'].str.replace('chr', '')
d['POS']= np.where(d.ref.str.len() < d.alts.str.len(), d.chromStart, d.chromEnd)
d['ref']= np.where(d.ref.str.len()< d.alts.str.len(), 'I', d.ref)
d['ref']= np.where(d.ref.str.len() > d.alts.str.len(), 'D', d.ref)
d['a1']= np.where(d.ref== 'I', 'D', d.a1)
d['a1']= np.where(d.ref== 'D', 'I', d.a1)
df= d.copy()
df= df.loc[df.a2!= '', :]
d.loc[d.ref > d.a1, ['ref', 'a1']] = d.loc[d.ref > d.a1, ['a1', 'ref']].values
d['ID']= d['#chrom'] + ':' + d['POS'].astype(int).astype(str) + ':' + d.ref + ':' + d.a1
df.loc[df.ref > df.a2, ['ref', 'a2']] = df.loc[df.ref > df.a2, ['a2', 'ref']].values
df['ID']= df['#chrom'] + ':' + df['POS'].astype(int).astype(str) + ':' + df.ref + ':' + df.a2
df= df[['ID', 'name']]
d= d[['ID', 'name']]
d= | pd.concat([d, df]) | pandas.concat |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, | Timestamp('2000-01-06 00:00:00') | pandas.Timestamp |
"""Python Script Template."""
import os
import pandas as pd
H_PARAMS = "hparams.json"
STATISTICS = "statistics.json"
def get_name(h_params):
"""Get experiment name from hyper parameter json file."""
protagonist_name = h_params.protagonist_name[0]
if protagonist_name in ["MVE", "Dyna", "STEVE"]:
protagonist_name += f"-{h_params.base_agent_name[0]}"
if protagonist_name in ["MVE", "Dyna", "STEVE", "BPTT"]:
protagonist_name += f"-{h_params.num_steps[0]}"
label = protagonist_name
if h_params.hallucinate[0]:
hallucination = True
if h_params.strong_antagonist[0]:
label, strong = "H-" + protagonist_name + "-strong", True
else:
label, strong = "H-" + protagonist_name + "-weak", False
else:
hallucination, strong = False, False
wrapper = h_params.adversarial_wrapper[0]
alpha = h_params.alpha[0]
return dict(
protagonist_name=protagonist_name,
wrapper=wrapper,
alpha=alpha,
label=label,
hallucination=hallucination,
strong=strong,
)
def get_player_data_frame(run_dir, player="Protagonist"):
"""Get Player dataframe."""
agent_listdir = os.listdir(run_dir)
agents = [*filter(lambda x: player in x, agent_listdir)]
if len(agents) == 0:
return pd.DataFrame()
name = agents[0]
player_dir = f"{run_dir}/{name}"
player_dir = player_dir + "/" + os.listdir(player_dir)[0]
if len(os.listdir(player_dir)) == 0:
return pd.DataFrame()
return pd.read_json(f"{player_dir}/statistics.json")
def extend_data_frame(df, name_dict):
"""Extend data frame with a name dictionary."""
df["counter"] = range(len(df))
for key, value in name_dict.items():
df[key] = value
def get_all_data_frames(base_dir="runs/RARLAgent/"):
"""Get experiment data frames."""
if base_dir[-1] != "/":
base_dir = base_dir + "/"
joint_df = | pd.DataFrame() | pandas.DataFrame |
import argparse
import sys
import pandas as pd
def process_command_line():
"""
Parse command line arguments
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
Return a Namespace representing the argument list.
"""
# Create the parser
parser = argparse.ArgumentParser(prog='mm_merge_predict_simulated',
description='merge simulation scenario output with mm predictions')
# Add arguments
parser.add_argument(
"experiment", type=str,
help="String used in output filenames"
)
parser.add_argument(
"perf_curve_pred_filename", type=str,
help="Path to csv file which contains predictions"
)
parser.add_argument(
"y_data_path", type=str,
help="Path to directory containing y data files (which are created from sim output)"
)
parser.add_argument(
"output_filename", type=str,
help="Path to merged output csv file"
)
# Do the parsing and return the populated namespace with the input arg values
args = parser.parse_args()
return args
def main(argv=None):
# Parse command line arguments
mm_args = process_command_line()
exp = mm_args.experiment
perf_curve_pred_filename = mm_args.perf_curve_pred_filename
y_data_path = mm_args.y_data_path
output_filename = mm_args.output_filename
predictions_df = | pd.read_csv(perf_curve_pred_filename) | pandas.read_csv |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = | Categorical(input1, categories=cat_array, ordered=ordered) | pandas.Categorical |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
| tm.assert_index_equal(rs.index, rng) | pandas._testing.assert_index_equal |
"""Assay information class"""
import os
import yaml
import pandas as pd
from .example_filetype_format import FileTypeFormat
from . import process_functions
class Assayinfo(FileTypeFormat):
"""Assay information file type"""
_fileType = "assayinfo"
_process_kwargs = ["newPath", "databaseSynId"]
def _validateFilename(self, filepath_list):
"""Validate assay information filename"""
assert os.path.basename(filepath_list[0]) == "assay_information.yaml"
def process_steps(self, assay_info_df, newPath, databaseSynId):
"""
Process bed input and update bed database
Args:
assay_info_df: Assay information dataframe
newPath: Path to processed assay information
databaseSynId: assay information database synapse id
Returns:
path to assay information dataframe
"""
# Must pass in a list
process_assay_info_df = self._process(assay_info_df)
process_functions.updateData(self.syn, databaseSynId,
process_assay_info_df, self.center,
filterByColumn="CENTER",
toDelete=True)
process_assay_info_df.to_csv(newPath, sep="\t", index=False)
return newPath
def _process(self, df):
"""
Process assay_information.yaml. Standardizes SEQ_ASSAY_ID,
default 10 for gene_padding, and fills in variant_classifications
Args:
df: Assay information dataframe
Returns:
dataframe: Processed dataframe
"""
seq_assay_ids = [assay.upper().replace('_', '-')
for assay in df['SEQ_ASSAY_ID']]
df['SEQ_ASSAY_ID'] = seq_assay_ids
df['SEQ_PIPELINE_ID'] = [assay.upper().replace('_', '-')
for assay in df['SEQ_PIPELINE_ID']]
if process_functions.checkColExist(df, "gene_padding"):
df['gene_padding'] = df['gene_padding'].fillna(10)
df['gene_padding'] = df['gene_padding'].astype(int)
else:
df['gene_padding'] = 10
if not process_functions.checkColExist(df, "variant_classifications"):
df['variant_classifications'] = float('nan')
df['CENTER'] = self.center
return df
def _get_dataframe(self, filepath_list):
"""Take in yaml file, returns dataframe"""
filepath = filepath_list[0]
try:
with open(filepath, 'r') as yamlfile:
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
# Must add this because yaml load deprecation
assay_info_dict = yaml.load(yamlfile, Loader=yaml.FullLoader)
except Exception:
raise ValueError(
"assay_information.yaml: Can't read in your file. "
"Please make sure the file is a correctly formatted yaml")
# assay_info_df = pd.DataFrame(panel_info_dict)
# assay_info_df = assay_info_df.transpose()
# assay_info_df['SEQ_ASSAY_ID'] = assay_info_df.index
# assay_info_df.reset_index(drop=True, inplace=True)
assay_infodf = pd.DataFrame(assay_info_dict)
assay_info_transposeddf = assay_infodf.transpose()
all_panel_info = pd.DataFrame()
for assay in assay_info_dict:
assay_specific_info = assay_info_dict[assay]['assay_specific_info']
assay_specific_infodf = pd.DataFrame(assay_specific_info)
seq_assay_id_infodf = assay_info_transposeddf.loc[[assay]]
to_appenddf = [seq_assay_id_infodf]*(len(assay_specific_info) - 1)
if to_appenddf:
seq_assay_id_infodf = seq_assay_id_infodf.append(to_appenddf)
seq_assay_id_infodf.reset_index(drop=True, inplace=True)
assay_finaldf = pd.concat(
[assay_specific_infodf, seq_assay_id_infodf], axis=1)
del assay_finaldf['assay_specific_info']
columns_containing_lists = ['variant_classifications',
'alteration_types',
'preservation_technique', 'coverage']
for col in columns_containing_lists:
if assay_finaldf.get(col) is not None:
assay_finaldf[col] = [";".join(row)
for row in assay_finaldf[col]]
assay_finaldf['SEQ_PIPELINE_ID'] = assay
all_panel_info = all_panel_info.append(assay_finaldf)
return all_panel_info
def _validate(self, assay_info_df):
"""
Validates the values of assay information file
Args:
assay_info_df: assay information dataframe
Returns:
tuple: error and warning
"""
total_error = ""
warning = ""
if process_functions.checkColExist(assay_info_df, "SEQ_ASSAY_ID"):
all_seq_assays = assay_info_df.SEQ_ASSAY_ID.unique()
if not all([assay.startswith(self.center)
for assay in all_seq_assays]):
total_error += \
("Assay_information.yaml: Please make sure your all your "
"SEQ_ASSAY_IDs start with your center abbreviation.\n")
else:
total_error += \
"Assay_information.yaml: Must have SEQ_ASSAY_ID column.\n"
read_group_dict = process_functions.get_gdc_data_dictionary(
"read_group")
read_group_headers = read_group_dict['properties']
warn, error = process_functions.check_col_and_values(
assay_info_df,
'is_paired_end',
[True, False],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df, 'library_selection',
read_group_headers['library_selection']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df,
'library_strategy',
read_group_headers['library_strategy']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df,
'platform',
read_group_headers['platform']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
instrument_model = read_group_headers['instrument_model']['enum']
instrument_model.append(None)
warn, error = process_functions.check_col_and_values(
assay_info_df,
'instrument_model',
instrument_model,
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
# target_capture_kit = read_group_headers['target_capture_kit']['enum']
# warn, error = process_functions.check_col_and_values(
# assay_info_df,
# 'target_capture_kit',
# target_capture_kit,
# filename="Assay_information.yaml",
# required=True)
# warning += warn
# total_error += error
if not process_functions.checkColExist(assay_info_df,
"target_capture_kit"):
total_error += ("Assay_information.yaml: "
"Must have target_capture_kit column.\n")
variant_classes = ['Splice_Site', 'Nonsense_Mutation',
'Frame_Shift_Del', 'Frame_Shift_Ins',
'Nonstop_Mutation', 'Translation_Start_Site',
'In_Frame_Ins', 'In_Frame_Del',
'Missense_Mutation', 'Intron', 'Splice_Region',
'Silent', 'RNA', "5'UTR", "3'UTR", 'IGR',
"5'Flank", "3'Flank", None]
warn, error = process_functions.check_col_and_values(
assay_info_df,
'variant_classifications',
variant_classes,
filename="Assay_information.yaml",
na_allowed=True,
sep=";")
warning += warn
total_error += error
if process_functions.checkColExist(assay_info_df, "read_length"):
if not all([process_functions.checkInt(i)
for i in assay_info_df["read_length"]
if i is not None and not | pd.isnull(i) | pandas.isnull |
import pandas as pd
import xlsxwriter
array = [['a1', 'a2', 'a3'],
['a4', 'a5', 'a6'],
['a7', 'a8', 'a9'],
['a10', 'a11', 'a12', 'a13', 'a14']]
months = ['jan', 'feb', 'mar', 'apr', 'may']
df = | pd.DataFrame(array, columns=months) | pandas.DataFrame |
import mne
import pandas as pd
from my_settings import *
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=180e-6 #
)
result = | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
import sys
from tl.file_formats_validator import FFV
from tl.exceptions import UnsupportTypeError
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
class Utility(object):
def __init__(self, es, output_column_name: str = 'retrieval_score',
previous_match_column_name: str = 'retrieval_score'):
self.es = es
self.previous_match_column_name = previous_match_column_name
self.ffv = FFV(previous_match_column_name)
self.score_column_name = output_column_name
def create_candidates_df(self, df, column, size, properties, method,
lower_case=False, auxiliary_fields=None,
auxiliary_folder=None, auxiliary_file_prefix='',
extra_musts=None, max_threads=50, identifier_property=None):
properties = [_.strip() for _ in properties.split(',')]
candidates_format = list()
df_columns = df.columns
all_candidates_aux_dict = {}
max_threads = min(df.shape[0], max_threads)
if self.ffv.is_canonical_file(df):
rows = df.to_dict("records")
with ThreadPoolExecutor(max_workers=max_threads) as executor:
for _candidates_format, candidates_aux_dict in executor.map(
self.create_candidates, rows, repeat(df_columns),
repeat(column), repeat(size), repeat(properties),
repeat(method), repeat(lower_case),
repeat(auxiliary_fields), repeat(extra_musts), repeat(identifier_property)):
all_candidates_aux_dict = {**all_candidates_aux_dict,
**candidates_aux_dict}
candidates_format.extend(_candidates_format)
self.write_auxiliary_files(auxiliary_folder,
all_candidates_aux_dict,
auxiliary_fields,
prefix=auxiliary_file_prefix)
return pd.DataFrame(candidates_format)
elif self.ffv.is_candidates_file(df):
grouped = df.groupby(by=['column', 'row', column])
relevant_columns = [c for c in df_columns if
c not in ['kg_id', 'kg_labels', 'method',
'kg_descriptions',
self.previous_match_column_name]]
rows = list()
for key_tuple, gdf in grouped:
gdf.reset_index(inplace=True)
rows.append({c: gdf.at[0, c] for c in relevant_columns})
with ThreadPoolExecutor(max_workers=max_threads) as executor:
for _candidates_format, candidates_aux_dict in executor.map(
self.create_candidates, rows,
repeat(relevant_columns), repeat(column),
repeat(size), repeat(properties), repeat(method),
repeat(lower_case), repeat(auxiliary_fields),
repeat(extra_musts)):
all_candidates_aux_dict = {**all_candidates_aux_dict,
**candidates_aux_dict}
candidates_format.extend(_candidates_format)
self.write_auxiliary_files(auxiliary_folder,
all_candidates_aux_dict,
auxiliary_fields,
prefix=auxiliary_file_prefix)
return pd.concat([df, | pd.DataFrame(candidates_format) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = | Series([0.4, np.nan, np.nan], index=exp_idx, name='s') | pandas.core.api.Series |
##
drive_path = 'c:/'
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
from scipy.stats import anderson_ksamp
from scipy.stats import kruskal
from scipy.stats import variation
from scipy import signal as sps
import seaborn as sns
import glob
import re
##
#This piece spits out all the peaks in one dataframe
def getpeaks(date):
'''Spits out all the peaks from imaging session
session input as string
'''
# This piece spits out all the peaks from one session in one dataframe
peakdf = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=175)
df = f[[col for col in f.columns if 'G PMT' in col]]
peak = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
# Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80])) > np.absolute(max(a[26:80])) and np.absolute(min(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = min(a[26:80])
peak.append(b)
elif np.absolute(max(a[26:80])) > np.absolute(min(a[26:80])) and np.absolute(max(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = max(a[26:80])
peak.append(b)
else:
b = 0
peak.append(b)
peaks = pd.DataFrame(peak).T
peaks.columns = df.columns
peaks = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), peaks], axis=1)
peakdf = peakdf.append(peaks, ignore_index=True)
peakdf.to_csv('%s_peaks.csv' % date, index=False)
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get average peak across all trials using peakdf dataframe
meandf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(peakdf.loc[peakdf['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meandf = meandf.append(mean)
meandf = meandf.reset_index(drop=True)
meandf.columns = [str(col) + '_' + date for col in meandf.columns]
meandf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meandf], axis=1)
meandf.to_csv('%s_mean.csv' % date, index=False)
# Get proportion of successful trials
successdf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
newdf = peakdf.loc[peakdf['Trial'].isin(odor)]
s = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
s.append(np.divide((newdf.loc[:, col] != 0).sum(), float(len(newdf.loc[:, col]))))
s = pd.DataFrame(s).T
s.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
successdf = successdf.append(s)
successdf = successdf.reset_index(drop=True)
successdf.columns = [str(col) + '_' + date for col in successdf.columns]
successdf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), successdf], axis=1)
successdf.to_csv('%s_success.csv' % date, index=False)
return 'Done'
##
def getintegral(date):
'''Compute integrals and integral means
date: string, session
'''
temp = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
# Pull the trials that correspond to specific date/odors
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get the frame rate for a specified date
num = trials.File.unique().tolist().index('%s' % date)
fr = trials.loc[trials['File'] == trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
# Get the integral
intdf = pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=125)
df = f[[col for col in f.columns if 'G PMT' in col]]
winstart = np.int(4 * fr)
winend = np.int(12 * fr)
integral = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
# Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80])) > np.absolute(max(a[26:80])) and np.absolute(min(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = sum(df[col][winstart:winend] * (1 / fr))
integral.append(b)
elif np.absolute(max(a[26:80])) > np.absolute(min(a[26:80])) and np.absolute(max(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = sum(df[col][winstart:winend] * (1 / fr))
integral.append(b)
else:
b = 0
integral.append(b)
integral = pd.DataFrame(integral).T
integral.columns = df.columns
integral = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), integral], axis=1)
intdf = intdf.append(integral)
intdf.to_csv('%s_integral.csv' % date, index=False)
# Get average integral across all trials using integral dataframe
meanint = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in intdf.loc[intdf['Trial'].isin(odor)][
[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(intdf.loc[intdf['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = intdf.loc[intdf['Trial'].isin(odor)][
[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanint = meanint.append(mean)
meanint = meanint.reset_index(drop=True)
meanint.columns = [str(col) + '_' + date for col in meanint.columns]
meanint = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meanint], axis=1)
meanint.to_csv('%s_meanint.csv' % date, index=False)
return 'Done'
##
def getbaseline(date):
temp = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
# Pull the trials that correspond to specific date/odors
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get the frame rate for a specified date
num = trials.File.unique().tolist().index('%s' % date)
fr = trials.loc[trials['File'] == trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
# Get baseline
baseline = pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=125)
df = f[[col for col in f.columns if 'G PMT' in col]]
winstart = np.int(4 * fr)
winend = np.int(12 * fr)
base = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
b = (df[col][firsta:firstb]).mean()
base.append(b)
base = pd.DataFrame(base).T
base.columns = df.columns
base = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), base], axis=1)
baseline = baseline.append(base)
baseline.to_csv('%s_baseline.csv' % date, index=False)
# mean baseline
meanbase = | pd.DataFrame([]) | pandas.DataFrame |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = | pd.read_csv(path) | pandas.read_csv |
# import sys, os
# sys.path.append( os.path.join( os.path.dirname( __file__ ), '..' ) )
import numpy as np
import pandas as pd
from . import ConfusionMatrix
y_true = [1,1,3,1]
y_pred = [1,2,2,1]
labels = [1,2,3]
names = ['foo','bar','baz']
def test_create_cmat():
'''
Check that constructing method, called with ideal arguments,
does not throw a weird error
'''
cm = ConfusionMatrix.create( y_true, y_pred, labels, names )
def test_create_cmat_names_ok():
''' Check that names are correctly set, either explicityly or dynamically '''
# Create with explicit names
cm = ConfusionMatrix.create( y_true, y_pred, labels, names )
assert np.all( cm.cmat.columns == np.array(names) )
assert np.all( cm.cmat.index == np.array(names) )
# Create without explicit names, labels should be used
cm = ConfusionMatrix.create( y_true, y_pred, labels )
assert np.all( cm.cmat.columns == np.array(labels) )
assert np.all( cm.cmat.index == np.array(labels) )
def test_create_cmat_labels_ok():
''' Check that labels are correctly set or dynamically created '''
# Create with explicit labels
cm = ConfusionMatrix.create( y_true, y_pred, labels+[4] )
assert cm.cmat.shape == (4,4)
# Create with dynamically discovered labels
cm = ConfusionMatrix.create( y_true, y_pred )
assert cm.cmat.shape == (3,3)
assert np.all( cm.cmat.columns == np.array( labels ))
assert np.all( cm.cmat.index == np.array( labels ))
def test_classes():
''' Check that classes returns the provided labels '''
cm = ConfusionMatrix.create( y_true, y_pred, labels, names )
assert cm.num_classes == 3
assert np.all( cm.classes == np.array( names ))
def test_support():
''' Check that support correctly returns support for each class '''
cm = ConfusionMatrix.create( y_true, y_pred, labels, names )
expected = | pd.Series([3,0,1], index=names ) | pandas.Series |
from collections import defaultdict
import csv
import pandas.compat as compat
from pandas import DataFrame
from pandas_datareader.base import _BaseReader
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
class YahooQuotesReader(_BaseReader):
"""Get current yahoo quote"""
@property
def url(self):
return 'http://finance.yahoo.com/d/quotes.csv'
@property
def params(self):
"""Parameters to use in API calls"""
if isinstance(self.symbols, compat.string_types):
sym_list = self.symbols
else:
sym_list = '+'.join(self.symbols)
# For codes see: http://www.gummy-stuff.org/Yahoo-data.htm
#
# Construct the code request string.
request = ''.join( | compat.itervalues(_yahoo_codes) | pandas.compat.itervalues |
#!/usr/bin/env python3
from argparse import ArgumentParser
from collections import defaultdict
import os
import sys
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.size'] = 12
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import statsmodels.stats.contingency_tables
import tqdm
from janus.pipeline import pipeline_to_tree as pt
from janus.repair.local_rules import (
is_match_edit,
edit_to_str,
)
from janus import utils
def add_timestamp_percentile(df, percentiles, percentiles_labels):
unique_ts = df.groupby(["dataset", "id"]).head(1)
percentile_map = {}
for d in unique_ts["dataset"].unique():
unique_ts_d = unique_ts[unique_ts["dataset"] == d]
unique_ts_d = unique_ts_d.sort_values("timestamp", ascending=True)
percents = pd.qcut(
unique_ts_d["timestamp"], percentiles, labels=percentiles_labels)
for i, p in zip(unique_ts_d["id"], percents):
percentile_map[(d, i)] = p
return [percentile_map[(d, i)] for d, i in zip(df["dataset"], df["id"])]
def prepare_df(df, compute_dist=False):
df_orig = df[df["type"] == "orig"]
df_orig = df_orig[~df_orig["failed"]]
# make sure we only consider dataset/id where we have the orig
# for all strategies
unique_strategies = df["strategy"].unique()
n_strategies = len(unique_strategies)
strategy_cts = df_orig.groupby(
["dataset", "id"])["strategy"].agg(lambda x: len(set(x)))
strategy_cts = strategy_cts.to_frame(name="strategy_cts").reset_index()
df_orig = pd.merge(df_orig, strategy_cts, how="left", on=["dataset", "id"])
df_orig = df_orig[df_orig["strategy_cts"] == n_strategies]
df_repaired = df[df["type"] == "repair"]
df_repaired = df_repaired[~df_repaired["failed"]]
df_repaired = df_repaired.sort_values("mean_test_score", ascending=False)
# there should only be one secore per dataset/id/strategy
assert df_repaired.groupby(["dataset", "strategy", "id"]).size().max() == 1
df_orig = df_orig[[
"dataset",
"strategy",
"id",
"test_scores",
"mean_test_score",
"graph",
"timestamp",
"repair_time",
]]
df_repaired = df_repaired[[
"dataset",
"strategy",
"id",
"mean_test_score",
"graph",
"test_scores",
"repair_time",
]]
df_combined = pd.merge(
df_orig,
df_repaired,
how="left",
on=["dataset", "strategy", "id"],
suffixes=("_orig", "_repaired"),
)
if compute_dist:
dist = [
None if pd.isnull(repaired) else pt.tree_edit_distance(
pt.to_pipeline(orig), pt.to_pipeline(repaired))
for orig, repaired in tqdm.tqdm(
list(
zip(df_combined["graph_orig"],
df_combined["graph_repaired"])))
]
else:
dist = np.nan
df_combined["distance"] = dist
# assign "row" to timestamp-based quartile
df_combined["ts_quartile"] = add_timestamp_percentile(
df_combined,
[0.0, 0.25, 0.5, 0.75, 1.0],
["0-0.25", "0.25-0.5", "0.5-0.75", "0.75-1.0"],
)
df_combined["ts_decile"] = add_timestamp_percentile(
df_combined, np.arange(0, 1.1, 0.1),
(lambda x: ["{:.1f}-{:.1f}".format(i, j) for i, j in zip(x, x[1:])])(
np.arange(0, 1.1, 0.1)))
df_combined["score_diff"] = df_combined[
"mean_test_score_repaired"] - df_combined["mean_test_score_orig"]
df_combined["had_effect"] = df_combined["score_diff"].abs() >= 0.01
df_combined["improved"] = (df_combined["score_diff"] >
0) & df_combined["had_effect"]
df_combined["improved_int"] = df_combined["improved"].astype(int)
df_combined["hurt"] = (df_combined["score_diff"] <
0) & df_combined["had_effect"]
df_combined["has_repair"] = ~pd.isnull(
df_combined["mean_test_score_repaired"])
df_combined["dummy"] = 1
return df_combined
def stat_by_strategy(df, col, groupcols=None, op="mean"):
if groupcols is None:
groupcols = ["dataset", "strategy"]
assert "strategy" in groupcols
stat_df = df.groupby(groupcols)[col].agg(op)
stat_df = stat_df.to_frame(name=col)
stat_df = stat_df.reset_index()
index_cols = list(groupcols)
index_cols.remove("strategy")
pv_stat_df = pd.pivot_table(
data=stat_df, index=index_cols, columns="strategy", values=col)
pv_stat_df = pv_stat_df.reset_index()
pv_stat_df.columns.name = None
return pv_stat_df
def summarize_df(df):
df = df.copy()
# now append version with "overall" (i.e. agg) ts_quartile
df_overall = df.copy()
df_overall["ts_quartile"] = "overall"
df = pd.concat((df, df_overall), axis=0).reset_index(drop=True)
groupcols = ["dataset", "ts_quartile", "strategy"]
print("Stat: Number of pipelines in experiments")
print(stat_by_strategy(df, "dummy", groupcols=groupcols, op="sum"), "\n")
print("Stat: Fraction w/ available 'repair'")
print(stat_by_strategy(df, "has_repair", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Fraction improved")
print(stat_by_strategy(df, "improved", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Total number improved")
print(stat_by_strategy(
df[df["ts_quartile"] == "overall"],
"improved_int",
groupcols=["dataset", "strategy"],
op="sum"), "\n")
print("Stat: Mean score diff")
print(stat_by_strategy(df, "score_diff", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Mean score diff (if improvement)")
print(stat_by_strategy(
df[df["score_diff"] > 0], "score_diff", groupcols=groupcols,
op="mean"), "\n")
df = df.sort_values("score_diff", ascending=False)
best_df = df.groupby(["dataset", "ts_quartile", "id"]).head(1)
print(
"Stat: Number of dataset/pipeline where a strategy gets largest score improvement"
)
print(stat_by_strategy(best_df, "dummy", groupcols=groupcols, op="sum"))
print("Stat: Mean distance (if improvement)")
print(stat_by_strategy(
df[df["score_diff"] > 0], "distance", groupcols=groupcols, op="mean"),
"\n")
print("Stat: Mean repair time")
print(stat_by_strategy(
df,
"repair_time_repaired",
groupcols=["dataset", "strategy"],
op="mean"))
def get_palette(df):
strats = sorted(df["strategy"].unique())
colors = sns.color_palette("colorblind", len(strats))
return strats, {s: c for s, c in zip(strats, colors)}
def get_bootstrap(func, vals, num_iters, low, hi, random_state=None):
num_obs = len(vals)
rng = np.random.RandomState(random_state)
boot_samples = rng.choice(vals, size=(num_iters, num_obs))
boot_ests = np.apply_along_axis(func, 1, boot_samples)
obs_val = func(vals)
boot_diffs = boot_ests - obs_val
low_diff = np.percentile(boot_diffs, low)
hi_diff = np.percentile(boot_diffs, hi)
# note the order of the differences
result = (obs_val - hi_diff, obs_val, obs_val - low_diff)
assert result[0] < result[1] < result[2]
return result
def get_rng(strategy):
return sum(ord(c) for c in strategy)
def table_fraction_outcome(df, column, get_bold, random_state=None):
mean_with_ci = lambda d, rs: get_bootstrap(
np.mean, d, 1000, 5.0, 95.0, random_state=rs
)
df_res = df.groupby(["dataset", "strategy"]).apply(
lambda d: mean_with_ci(d[column].values, get_rng(d["strategy"].iloc[0]))
)
df_res = df_res.to_frame(name="change_with_ci").reset_index()
format_text = "{:.2f} ({:.2f}-{:.2f})"
df_res["val_text"] = df_res["change_with_ci"].map(
lambda t: format_text.format(t[1], t[0], t[2]))
df_res["mean_val"] = df_res["change_with_ci"].map(lambda t: t[1])
# bold the highest value per dataset
df_res["rank"] = df_res.groupby(["dataset"])["mean_val"].rank(
"dense", ascending=False)
df_res["with_bold"] = df_res.groupby(["dataset"])["mean_val"].apply(get_bold)
df_res["val_text"] = [
"\\textbf{{{}}}".format(txt) if bold else txt
for bold, txt in zip(df_res["with_bold"], df_res["val_text"])
]
df_res = df_res[["dataset", "strategy", "val_text"]]
df_pv = pd.pivot(
df_res, index="dataset", columns="strategy", values="val_text")
df_pv = df_pv.reset_index()
# escape ourselves
df_pv["dataset"] = df_pv["dataset"].map(lambda x: x.replace("_", "\\_"))
return df_pv
def table_fraction_repaired(df, random_state=None):
return table_fraction_outcome(
df, "improved", lambda x: x == max(x), random_state=random_state)
def table_fraction_hurt(df, random_state=None):
return table_fraction_outcome(
df, "hurt", lambda x: x == min(x), random_state=random_state)
def plot_fraction_repaired(df):
fig, ax = plt.subplots(1)
hue_order, palette = get_palette(df)
sns.barplot(
data=df,
x="improved",
y="dataset",
hue="strategy",
estimator=np.mean,
# linestyles=["None"] * len(df["strategy"].unique()),
dodge=True,
ci=95,
ax=ax,
orient="h",
palette=palette,
hue_order=hue_order,
)
ax.set_xlabel("Fraction of Pipelines Improved")
ax.set_ylabel("Dataset")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_fraction_repaired_rank(df):
# rank systems
# by fraction repaired
# and then count the ranks
fig, ax = plt.subplots(1)
df_frac = df.groupby(["dataset", "strategy"])[["improved"]].mean()
df_frac = df_frac.reset_index()
df_frac["rank"] = df_frac.groupby(["dataset"])["improved"].rank(
"dense", ascending=False)
hue_order, palette = get_palette(df_frac)
sns.countplot(
data=df_frac,
x="rank",
hue="strategy",
dodge=True,
ax=ax,
palette=palette,
hue_order=hue_order,
)
ax.set_xlabel("Rank")
ax.set_ylabel("Datasets")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def plot_fraction_candidate(df):
fig, ax = plt.subplots(1)
df = df.copy()
sns.barplot(
data=df,
x="has_repair",
y="dataset",
hue="strategy",
estimator=np.mean,
# linestyles=["None"] * len(df["strategy"].unique()),
dodge=True,
ci=95,
ax=ax,
orient="h",
)
ax.set_xlabel("Fraction of Pipelines with Repair Candidate")
ax.set_ylabel("Dataset")
plt.legend(
loc="center right", bbox_to_anchor=(0.0, 1.05, 1., .102), ncol=2)
plt.tight_layout()
return ax
def fraction_repaired_over_time(df):
fig, ax = plt.subplots(1)
sns.pointplot(
data=df,
x="ts_decile",
y="improved",
hue="strategy",
estimator=np.mean,
dodge=True,
ci=95,
ax=ax,
)
plt.xticks(rotation=90)
ax.set_xlabel("Timestamp Decile")
ax.set_ylabel("Fraction of Pipelines Improved")
plt.legend(loc="best", title=None, ncol=2)
plt.tight_layout()
return ax
def plot_score_improvement(df):
all_strategies = df["strategy"].unique()
n_all_strategies = len(all_strategies)
df = df[df["improved"]]
check = df.groupby(["dataset",
"id"])["strategy"].agg(lambda x: len(set(x)))
check = check.to_frame(name="num_strategies").reset_index()
check["drop"] = check["num_strategies"] < n_all_strategies
df = | pd.merge(df, check, how="left", on=["dataset", "id"]) | pandas.merge |
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
import pydot
import math
import numpy as num
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
uploaded_files = st.sidebar.file_uploader("Choose files", accept_multiple_files=True)
# sidebar for navigating pages
page_nav = st.sidebar.selectbox("Select view:",('Document overviews','Focus concepts','Path views','Active Study view','Study phenomena','Study sets'))
@st.cache
def do_this_first(uploaded_files):
#st.write(st.__version__)
# Load any compressed pickle file
# for uploaded_file in uploaded_files:
# concepts = decompress_pickle(uploaded_file)
# st.write("filename:", uploaded_file.name)
filenames = [file.name for file in uploaded_files] # return this
import pandas as pd
Agg_Conceptdata = pd.DataFrame()
All_Conceptdata = pd.DataFrame()
Agg_np_to_sent = dict()
Agg_sent_to_npflat = dict()
Agg_sent_to_phen = dict()
Agg_phen_to_sent = dict()
Agg_att_to_sent = dict()
Agg_sent_to_att = dict()
Agg_ins_to_sent = dict()
Agg_sent_to_ins = dict()
Agg_set_to_sent = dict()
Agg_sent_to_set = dict()
Agg_np_to_forms = dict()
doc_to_np = dict()
np_to_doc = dict()
Agg_df = pd.DataFrame()
Agg_df = pd.DataFrame()
Agg_np_to_roles = dict()
Agg_sent_to_clt = dict()
Agg_sents = dict()
#Agg_sents_df = pd.DataFrame()
#Agg_docs_df = pd.DataFrame()
All_df = pd.DataFrame()
for uploaded_file in uploaded_files:
concepts = decompress_pickle(uploaded_file)
filename = uploaded_file.name
#st.write("filename:", uploaded_file.name)
Conceptdata = concepts['Conceptdata']
sent_to_npflat = concepts['sent_to_npflat']
np_to_sent = concepts['np_to_sent']
np_to_forms = concepts['np_to_forms']
sent_to_phen = concepts['sent_to_phen']
phen_to_sent = concepts['phen_to_sent']
sent_to_att = concepts['sent_to_att']
att_to_sent = concepts['att_to_sent']
att_to_sent = concepts['att_to_sent']
ins_to_sent = concepts['ins_to_sent']
sent_to_ins = concepts['sent_to_ins']
set_to_sent = concepts['set_to_sent']
sent_to_set = concepts['sent_to_set']
np_to_roles = concepts['np_to_roles']
sent_to_clt = concepts['sent_to_clt']
sents = concepts['sents']
df = concepts['df']
Conceptdata['docname'] = filename
Agg_Conceptdata = Agg_Conceptdata.append(Conceptdata,ignore_index=True)
Agg_sent_to_clt[filename.replace(".pbz2","")] = sent_to_clt
Agg_np_to_sent[filename.replace(".pbz2","")] = np_to_sent
Agg_sents[filename.replace(".pbz2","")] = sents
Agg_sent_to_npflat[filename.replace(".pbz2","")] = sent_to_npflat
Agg_sent_to_set[filename.replace(".pbz2","")] = sent_to_set
Agg_sent_to_att[filename.replace(".pbz2","")] = sent_to_att
Agg_sent_to_phen[filename.replace(".pbz2","")] = sent_to_phen
Agg_sent_to_ins[filename.replace(".pbz2","")] = sent_to_ins
Agg_df = Agg_df.append(df,ignore_index=True)
doc_to_np[filename] = list(np_to_sent.keys()) # return this
for np in np_to_sent:
# if np in Agg_np_to_sent:
# Agg_np_to_sent[np] = Agg_np_to_sent[np] + [(filename,s) for s in np_to_sent[np]]
# else:
# Agg_np_to_sent[np] = [(filename,s) for s in np_to_sent[np]]
if np in np_to_doc:
np_to_doc[np] = np_to_doc[np] + [filename]
else:
np_to_doc[np] = [filename]
for np in np_to_forms:
if np in Agg_np_to_forms:
Agg_np_to_forms[np] = Agg_np_to_forms[np] + np_to_forms[np]
else:
Agg_np_to_forms[np] = np_to_forms[np]
for np in np_to_roles:
if np in Agg_np_to_roles:
Agg_np_to_roles[np] = Agg_np_to_roles[np] + np_to_roles[np]
else:
Agg_np_to_roles[np] = np_to_roles[np]
for np in phen_to_sent:
if np in Agg_phen_to_sent:
Agg_phen_to_sent[np] = Agg_phen_to_sent[np] + [(filename,s) for s in phen_to_sent[np]]
else:
Agg_phen_to_sent[np] = [(filename,s) for s in phen_to_sent[np]]
for np in att_to_sent:
if np in Agg_att_to_sent:
Agg_att_to_sent[np] = Agg_att_to_sent[np] + [(filename,s) for s in att_to_sent[np]]
else:
Agg_att_to_sent[np] = [(filename,s) for s in att_to_sent[np]]
for np in set_to_sent:
if np in Agg_set_to_sent:
Agg_set_to_sent[np] = Agg_set_to_sent[np] + [(filename,s) for s in set_to_sent[np]]
else:
Agg_set_to_sent[np] = [(filename,s) for s in set_to_sent[np]]
for np in ins_to_sent:
if np in Agg_ins_to_sent:
Agg_ins_to_sent[np] = Agg_ins_to_sent[np] + [(filename,s) for s in ins_to_sent[np]]
else:
Agg_ins_to_sent[np] = [(filename,s) for s in ins_to_sent[np]]
#st.write(Agg_Conceptdata.columns)
All_Conceptdata = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta, timezone
import random
from tabnanny import check
import unittest
import pandas as pd
import pytz
if __name__ == "__main__":
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
from datatube.dtype import check_dtypes
class TestObj:
pass
unittest.TestCase.maxDiff = None
SIZE = 3
TEST_DATA = {
int: {
"integers":
[-1 * SIZE // 2 + i + 1 for i in range(SIZE)],
"whole floats":
[-1 * SIZE // 2 + i + 1.0 for i in range(SIZE)],
"real whole complex":
[complex(-1 * SIZE // 2 + i + 1, 0) for i in range(SIZE)],
},
float: {
"decimal floats":
[-1 * SIZE // 2 + i + 1 + random.random() for i in range(SIZE)],
"real decimal complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(), 0)
for i in range(SIZE)],
},
complex: {
"imaginary complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
},
str: {
"integer strings":
[str(-1 * SIZE // 2 + i + 1) for i in range(SIZE)],
"whole float strings":
[str(-1 * SIZE // 2 + i + 1.0) for i in range(SIZE)],
"decimal float strings":
[str(-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
"real whole complex strings":
[str(complex(-1 * SIZE // 2 + i + 1, 0)) for i in range(SIZE)],
"real decimal complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(), 0))
for i in range(SIZE)],
"imaginary complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random()))
for i in range(SIZE)],
"character strings":
[chr(i % 26 + ord("a")) for i in range(SIZE)],
"boolean strings":
[str(bool((i + 1) % 2)) for i in range(SIZE)],
"aware datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc))
for i in range(SIZE)],
"aware ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat()
for i in range(SIZE)],
"naive datetime strings":
[str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"naive ISO 8601 strings":
[datetime.fromtimestamp(i).isoformat() for i in range(SIZE)],
"aware/naive datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc)) if i % 2
else str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"aware/naive ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat() if i % 2
else datetime.fromtimestamp(i).isoformat()
for i in range(SIZE)],
"mixed timezone datetime strings":
[str(
datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
)
) for i in range(SIZE)],
"mixed timezone ISO 8601 strings":
[datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
).isoformat() for i in range(SIZE)],
"timedelta strings":
[str(timedelta(seconds=i + 1)) for i in range(SIZE)],
"pd.Timedelta strings":
[str(pd.Timedelta(timedelta(seconds=i + 1))) for i in range(SIZE)]
},
bool: {
"booleans":
[bool((i + 1) % 2) for i in range(SIZE)]
},
datetime: {
"aware datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) for i in range(SIZE)],
"naive datetimes":
[datetime.fromtimestamp(i) for i in range(SIZE)],
"aware/naive datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) if i % 2
else datetime.fromtimestamp(i) for i in range(SIZE)],
"mixed timezone datetimes":
[datetime.fromtimestamp(
i,
tz = pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
) for i in range(SIZE)]
},
timedelta: {
"timedeltas":
[timedelta(seconds=i + 1) for i in range(SIZE)]
},
object: {
"Nones":
[None for _ in range(SIZE)],
"custom objects":
[TestObj() for _ in range(SIZE)]
}
}
ALL_DATA = {col_name: data for v in TEST_DATA.values()
for col_name, data in v.items()}
class CheckDtypeTests(unittest.TestCase):
def test_check_integers_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: int})
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: int}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: int})
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: int}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, float)
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., float) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, float)
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., float) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: float})
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: float}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: float})
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: float}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, complex)
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., complex) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, complex)
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., complex) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: complex})
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: complex}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: complex})
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: complex}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, str)
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., str) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, str)
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., str) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_df_no_na(self):
df = | pd.DataFrame(ALL_DATA) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
from streamlit_option_menu import option_menu
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score, KFold
from sklearn.svm import SVR
from sklearn.decomposition import PCA
from xgboost import XGBRegressor
from vega_datasets import data
st.set_page_config(layout="wide")
st.markdown(""" <style> .font {
font-size:50px ; font-family: 'Cooper Black'; color: #FF9633;}
</style> """, unsafe_allow_html=True)
@st.cache()
def load_data(allow_output_mutation=True):
data_path = "ds_data.csv"
df = pd.read_csv(data_path)
return df
def get_state_slices(df,state):
lable = pd.Series([True]*len(df),index=df.index)
if state:
lable &= df['Job Location'].apply(lambda x : True if x==state else False)
return lable
df = load_data()
def Xgb_Regression():
model = xgb.XGBRegressor(max_depth=7, eta=0.1,n_estimators=1000, learning_rate=0.1)
return model
@st.cache(allow_output_mutation=True)
def train_model(x,y):
model = Xgb_Regression()
xtrain, xtest, ytrain, ytest=train_test_split(x, y, test_size=0.1)
model.fit(xtrain, ytrain)
print("***")
print(xtest)
return model, xtrain, xtest, ytrain, ytest
def feature_importance(model,x):
df = pd.DataFrame(model.feature_importances_,columns=['importance'])
df['features'] = x.columns
df.reindex(x.columns, axis=1)
feature_chart = alt.Chart(df).mark_bar(color="#C0EDA6").encode(
y=alt.Y('features',sort="-x"),
x=alt.X('importance'),
color="importance",
tooltip = ['importance']
).interactive()
st.altair_chart(feature_chart, use_container_width=True)
def model_accuracy(model, xtrain, xtest, ytrain, ytest):
#evaluate
scores = cross_val_score(model, xtrain, ytrain,cv=5)
print("Mean cross-validation score: %.2f" % scores.mean())
ypred = model.predict(xtest)
mse = mean_squared_error(ytest, ypred)
print("MSE: %.2f" % mse)
y_pred_test = model.predict(xtest)
source = pd.DataFrame({
'ytest':ytest,
'y_pred_test':y_pred_test,
})
predVSactual=alt.Chart(source).mark_circle(size=60).encode(
x='ytest',
y='y_pred_test',
).interactive()
line = alt.Chart(source).mark_line(
color='red',
size=3
).encode(
x="ytest",
y="ytest",
)
st.altair_chart(predVSactual+line,use_container_width=True)
st.sidebar.image("images/logo.png", use_column_width=True)
with st.sidebar:
selected = option_menu(
menu_title = "",
options = ["Data Exploration", "Salary Prediction"],
default_index=0,
icons=["boxes","search","clipboard-data"],
)
if selected =="Data Exploration":
st.title(f"{selected}")
if st.checkbox("Show raw data"):
st.write(df[:25])
st.header("🗺️ The salary map of Data Scientist in the US")
counties = alt.topo_feature(data.us_10m.url, 'counties')
source = data.unemployment.url
us_states = ['AK', 'AL', 'AR', 'AS', 'AZ', 'CA', 'CO', 'CQ', 'CT', 'DC', 'DE', 'FL', 'GA', 'GU',
'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME',
'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM',
'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX',
'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']
dic = {}
for i, s in enumerate(us_states):
dic[s] = i + 1
df_map = df.groupby('Job Location').agg({'avgSalary': np.mean}).reset_index()
df_map['id'] = df_map['Job Location'].apply(lambda x : dic[x])
states = alt.topo_feature(data.us_10m.url, feature='states')
map_salary = alt.Chart(states).mark_geoshape().encode(
color='avgSalary:Q',
tooltip=[
alt.Tooltip("Job Location:N", title="State"),
alt.Tooltip("avgSalary" + ":O", format='.1f', title="Average Salary"),
],
).transform_lookup(
lookup='id',
from_=alt.LookupData(df_map, 'id', ['Job Location', 'avgSalary'])
).project('albersUsa')
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
title='map of average data scientist salary/K',
width=700,
height=400
).project('albersUsa')
st.write(background + map_salary)
st.header("❓ Select the states you want to know about:")
values = df['Job Location'].unique().tolist()
default_ix = values.index('CA')
state_option = st.selectbox(
'state',
values,
index=default_ix
)
state_slices = get_state_slices(df,state_option)
nationwide = pd.DataFrame()
nationwide['avgSalary'] = df['avgSalary']
nationwide['name'] = 'nationwide salary'
state = pd.DataFrame()
state['avgSalary'] = df[state_slices]['avgSalary']
state['name'] = state_option + ' salary'
category = ['0-50K', '50K-100K', '100K-150K', '150K-200K', '200K-250K', '250K-300K']
nationwide['binned']=pd.cut(x=nationwide['avgSalary'], bins=[0,50,100,150,200, 250, 300], labels=category)
nation_count = pd.value_counts(nationwide['binned']).reset_index()
nation_count['name'] = 'US salary'
state['binned']= | pd.cut(x=state['avgSalary'], bins=[0,50,100,150,200, 250, 300], labels=category) | pandas.cut |
"""
This module contains all US-specific data loading and data cleaning routines.
"""
import datetime
import requests
import pandas as pd
import numpy as np
from .. import data
idx = pd.IndexSlice
def get_raw_covidtracking_data(run_date: pd.Timestamp):
""" Gets the current daily CSV from COVIDTracking """
if run_date.date() > datetime.date.today():
raise ValueError("Run date is in the future. Nice try.")
if run_date.date() < datetime.date.today():
# TODO: implement downloading of historic data
raise NotImplementedError(
"Downloading with a run_date is not yet supported. "
f"Today: {datetime.date.today()}, run_date: {run_date}"
)
url = "https://covidtracking.com/api/v1/states/daily.csv"
data = pd.read_csv(url).rename(columns={
"state": "region",
})
data["date"] = pd.to_datetime(data["date"], format="%Y%m%d")
data = data.set_index(["region", "date"]).sort_index()
# Too little data or unreliable reporting in the data source.
df_raw = data.drop(["MP", "GU", "AS", "PR", "VI"])
# the data in these columns is crap. But it will be corrected by the process_covidtracking_data function
# here we just add the columns so the original data is kept
for region in df_raw.reset_index().region.unique():
df_raw.loc[idx[region, :], "new_cases"] = df_raw.xs(region).positive.diff().values
df_raw.loc[idx[region, :], "new_tests"] = df_raw.xs(region).total.diff().values
# calculate the sum over all states
df_all = df_raw.sum(level='date')
df_all.insert(0, column='region', value='all')
df_all = df_all.reset_index().set_index(['region', 'date'])
df_merged = pd.concat([df_raw, df_all]).sort_index()
return df_merged
def apply_corrections(data: pd.DataFrame) -> pd.DataFrame:
# On Jun 5 Covidtracking started counting probable cases too
# which increases the amount by 5014.
# https://covidtracking.com/screenshots/MI/MI-20200605-184320.png
data.loc[idx["MI", pd.Timestamp("2020-06-05") :], "total"] -= 5014
# From CT: On June 19th, LDH removed 1666 duplicate and non resident cases
# after implementing a new de-duplicaton process.
data.loc[idx["LA", pd.Timestamp("2020-06-19") :], ["positive", "total"]] += 1666
# calculate the daily counts
for region in data.reset_index().region.unique():
data.loc[idx[region, :], "new_cases"] = data.xs(region).positive.diff().values
data.loc[idx[region, :], "new_tests"] = data.xs(region).total.diff().values
data["new_cases"][data["new_cases"] < 0] = np.nan
data["new_tests"][data["new_tests"] < 0] = np.nan
# Michigan missed 6/18 totals and lumped them into 6/19 so we've
# divided the totals in two and equally distributed to both days.
data.loc[idx["MI", pd.Timestamp("2020-06-18")], "new_tests"] = 14871
data.loc[idx["MI", pd.Timestamp("2020-06-19")], "new_tests"] = 14871
# Note that when we set new_cases/new_tests to NaN, the model ignores that date. See
# the likelihood function in GenerativeModel.build
# Huge outlier in NJ causing sampling issues.
data.loc[idx["NJ", pd.Timestamp("2020-05-11")], ["new_cases", "new_tests"]] = np.nan
# Same tests and positives, nulling out
data.loc[idx["NJ", pd.Timestamp("2020-07-25")], ["new_cases", "new_tests"]] = np.nan
# Huge outlier in CA causing sampling issues.
data.loc[idx["CA", pd.Timestamp("2020-04-22")], ["new_cases", "new_tests"]] = np.nan
# Huge outlier in CA causing sampling issues.
# TODO: generally should handle when # tests == # positives and that
# is not an indication of positive rate.
data.loc[idx["SC", pd.Timestamp("2020-06-26")], ["new_cases", "new_tests"]] = np.nan
# Two days of no new data then lumped sum on third day with lack of new total tests
data.loc[idx["OR", pd.Timestamp("2020-06-26") : pd.Timestamp("2020-06-28")], 'new_cases'] = 174
data.loc[idx["OR", pd.Timestamp("2020-06-26") : pd.Timestamp("2020-06-28")], 'new_tests'] = 3296
#https://twitter.com/OHdeptofhealth/status/1278768987292209154
data.loc[idx["OH", pd.Timestamp("2020-07-01")], ["new_cases", "new_tests"]] = np.nan
data.loc[idx["OH", pd.Timestamp("2020-07-09")], ["new_cases", "new_tests"]] = np.nan
# Nevada didn't report total tests this day
data.loc[idx["NV", pd.Timestamp("2020-07-02")], ["new_cases", "new_tests"]] = np.nan
# A bunch of incorrect values for WA data so nulling them out.
data.loc[idx["WA", pd.Timestamp("2020-06-05") : pd.Timestamp("2020-06-07")], ["new_cases", "new_tests"]] = np.nan
data.loc[idx["WA", pd.Timestamp("2020-06-20") : pd.Timestamp("2020-06-21")], ["new_cases", "new_tests"]] = np.nan
# AL reported tests == positives
data.loc[idx["AL", pd.Timestamp("2020-07-09")], ["new_cases", "new_tests"]] = np.nan
# Low reported tests
data.loc[idx["AR", pd.Timestamp("2020-07-10")], ["new_cases", "new_tests"]] = np.nan
# Positives == tests
data.loc[idx["MS", pd.Timestamp("2020-07-12")], ["new_cases", "new_tests"]] = np.nan
# Positive == Tests; lumpy reporting for CT
data.loc[idx["CT", pd.Timestamp("2020-07-17")], ["new_cases", "new_tests"]] = np.nan
data.loc[idx["CT", pd.Timestamp("2020-07-21")], ["new_cases", "new_tests"]] = np.nan
data.loc[idx["DC", pd.Timestamp("2020-08-04")], ["new_cases", "new_tests"]] = np.nan
# Outlier dates in PA
data.loc[
idx[
"PA",
[
| pd.Timestamp("2020-06-03") | pandas.Timestamp |
import numpy as np
import pandas as pd
import pytest
from blocktorch.problem_types import (
ProblemTypes,
detect_problem_type,
handle_problem_types,
is_binary,
is_classification,
is_multiclass,
is_regression,
is_time_series,
)
@pytest.fixture
def correct_problem_types():
# Unit tests expect this order
correct_problem_types = [
ProblemTypes.REGRESSION,
ProblemTypes.MULTICLASS,
ProblemTypes.BINARY,
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
yield correct_problem_types
def test_handle_string(correct_problem_types):
problem_types = [
"regression",
ProblemTypes.MULTICLASS,
"binary",
ProblemTypes.TIME_SERIES_REGRESSION,
"time series binary",
"time series multiclass",
]
for problem_type in zip(problem_types, correct_problem_types):
assert handle_problem_types(problem_type[0]) == problem_type[1]
problem_type = "fake"
error_msg = "Problem type 'fake' does not exist"
with pytest.raises(KeyError, match=error_msg):
handle_problem_types(problem_type) == ProblemTypes.REGRESSION
def test_handle_problem_types(correct_problem_types):
for problem_type in correct_problem_types:
assert handle_problem_types(problem_type) == problem_type
def test_handle_incorrect_type():
error_msg = "`handle_problem_types` was not passed a str or ProblemTypes object"
with pytest.raises(ValueError, match=error_msg):
handle_problem_types(5)
def test_detect_problem_type_error():
y_empty = pd.Series([])
y_one_value = pd.Series([1, 1, 1, 1, 1, 1])
y_nan = pd.Series([np.nan, np.nan, 0])
y_all_nan = pd.Series([np.nan, np.nan])
with pytest.raises(ValueError, match="Less than 2"):
detect_problem_type(y_empty)
with pytest.raises(ValueError, match="Less than 2"):
detect_problem_type(y_one_value)
with pytest.raises(ValueError, match="Less than 2"):
detect_problem_type(y_nan)
with pytest.raises(ValueError, match="Less than 2"):
detect_problem_type(y_all_nan)
def test_detect_problem_type_binary():
y_binary = | pd.Series([1, 0, 1, 0, 0, 1]) | pandas.Series |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%%
from IPython import get_ipython
#%%
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import io
import base64
from matplotlib import animation
from matplotlib import cm
from matplotlib.pyplot import *
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from dateutil import parser
from IPython.display import HTML
from subprocess import check_output
import seaborn as sns
#%%
get_ipython().run_line_magic('matplotlib', 'inline')
#%%
plt.rcParams['patch.force_edgecolor'] = 'True'
plt.rcParams['figure.figsize'] = (16,10)
plt.rcParams['axes.unicode_minus'] = False
#%%
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
#%%
df_train.head()
#%%
df_train.describe()
#%%
df_train['log_trip duration'] = np.log(df_train['trip_duration'].values + 1)
plt.hist(df_train['log_trip duration'].values, bins=100)
plt.xlabel("log(trip duration)")
plt.ylabel('number of training records')
plt.show
#%%
#Ignore tHis
N = 20000
city_long_border = (-75, -75)
city_lat_border = (40,40)
fig,ax = plt.subplots(ncols=1)
ax.scatter(df_train['pickup_longitude'].values[:N],
df_train['pickup_latitude'].values[:N],
color='blue',s=1,label='train',alpha=0.1)
plt.show()
#%%
#%%
type(df_train['pickup_datetime'])
#%%
df_train['pickup_datetime'] = pd.to_datetime(df_train['pickup_datetime'])
#%%
df_train['dropoff_datetime'] = pd.to_datetime(df_train['dropoff_datetime'])
#%%
df_train['pickup_hr'] = df_train['pickup_datetime'].apply(lambda time:time.hour)
df_train['pickup_min'] = df_train['pickup_datetime'].apply(lambda time:time.minute)
df_train['pickup_sec'] = df_train['pickup_datetime'].apply(lambda time:time.second)
#%%
df_train['dropoff_hr'] = df_train['dropoff_datetime'].apply(lambda time:time.hour)
df_train['dropoff_min'] = df_train['dropoff_datetime'].apply(lambda time:time.minute)
df_train['dropoff_sec'] = df_train['dropoff_datetime'].apply(lambda time:time.second)
#%%
df_train['pickup_day'] = df_train['pickup_datetime'].apply(lambda time:time.dayofweek)
df_train['pickup_month'] = df_train['pickup_datetime'].apply(lambda time:time.month)
df_train['pickup_year'] = df_train['pickup_datetime'].apply(lambda time:time.year)
df_train['dropoff_day'] = df_train['dropoff_datetime'].apply(lambda time:time.dayofweek)
df_train['dropoff_month'] = df_train['dropoff_datetime'].apply(lambda time:time.month)
df_train['dropoff_year'] = df_train['dropoff_datetime'].apply(lambda time:time.year)
#%%
dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thur',4:'Fri',5:'Sat',6:'Sun'}
df_train['Pickup Day of Week'] = df_train['pickup_day'].map(dmap)
df_train['Dropoff Day of Week'] = df_train['dropoff_day'].map(dmap)
#%%
df_train.head()
#%%
df_train[df_train['Pickup Day of Week']!= df_train['Dropoff Day of Week']].head()
#%%
df_train[df_train['Pickup Day of Week']!= df_train['Dropoff Day of Week']].describe()
#%%
len(df_train[df_train['Pickup Day of Week']!= df_train['Dropoff Day of Week']])
#%%
sns.countplot('Pickup Day of Week',data=df_train, hue='pickup_month')
#%%
sns.countplot('Pickup Day of Week',data=df_train)
#%%
df_train['Date'] = df_train['pickup_datetime'].apply(lambda t: t.date())
#%%
df_train.groupby('Date').count()['id'].plot()
#%%
sns.countplot('Pickup Day of Week',data=df_train,hue='store_and_fwd_flag',palette='coolwarm')
#%%
sns.heatmap(df_train.corr(),cmap='coolwarm')
#%%
sns.scatterplot(x='pickup_latitude',y='dropoff_latitude',data=df_train)
#%%
sns.scatterplot(x='pickup_longitude',y='dropoff_longitude',data=df_train)
#%%
sns.jointplot(x='pickup_hr',y='dropoff_hr',data=df_train[:10000],kind = "reg")
#%%
# IDK if it is good.....
sns.jointplot(x='pickup_hr',y='dropoff_hr',data=df_train[:10000],kind = "hex")
#%%
xlim = [-74.03, -73.77]
ylim = [40.63, 40.85]
df_train = df_train[(df_train.pickup_longitude> xlim[0]) & (df_train.pickup_longitude < xlim[1])]
df_train = df_train[(df_train.dropoff_longitude> xlim[0]) & (df_train.dropoff_longitude < xlim[1])]
df_train = df_train[(df_train.pickup_latitude> ylim[0]) & (df_train.pickup_latitude < ylim[1])]
df_train = df_train[(df_train.dropoff_latitude> ylim[0]) & (df_train.dropoff_latitude < ylim[1])]
longitude = list(df_train.pickup_longitude) + list(df_train.dropoff_longitude)
latitude = list(df_train.pickup_latitude) + list(df_train.dropoff_latitude)
#%%
plt.figure(figsize = (10,10))
plt.plot(longitude,latitude,'.', alpha = 0.4, markersize = 0.05,color="black")
plt.show()
#%%
km_df = pd.DataFrame()
km_df['longitude'] = longitude
km_df['latitude'] = latitude
#%% [markdown]
# #### Now we will cluster the NYC map based on the cabs pick up and drop off points...
#%%
kmeans = KMeans(n_clusters=15, random_state=2, n_init = 10).fit(km_df)
km_df['label'] = kmeans.labels_
km_df = km_df.sample(200000)
plt.figure(figsize = (10,10))
for label in km_df.label.unique():
plt.plot(km_df.longitude[km_df.label == label],km_df.latitude[km_df.label == label],'.', alpha = 0.3, markersize = 0.3)
plt.title('Clusters of New York Based on Cab pickup and dropoff points')
plt.show()
#%% [markdown]
# As we can see, the clustering results in a partition which is somewhat similar to the way NY is divided into different neighborhoods. We can see Upper East and West side of Central park in gray and pink respectively. West midtown in blue, Chelsea and West Village in brown, downtown area in blue, East Village and SoHo in purple.
#
# The airports JFK and La LaGuardia have there own cluster, and so do Queens and Harlem. Brooklyn is divided into 2 clusters, and the Bronx has too few rides to be separated from Harlem.
#
# Let's plot the cluster centers:
#%%
fig,ax = plt.subplots(figsize = (10,10))
for label in km_df.label.unique():
ax.plot(km_df.longitude[km_df.label == label],km_df.latitude[km_df.label == label],'.', alpha = 0.4, markersize = 0.1, color = 'gray')
ax.plot(kmeans.cluster_centers_[label,0],kmeans.cluster_centers_[label,1],'o', color = 'r')
ax.annotate(label, (kmeans.cluster_centers_[label,0],kmeans.cluster_centers_[label,1]), color = 'b', fontsize = 20)
ax.set_title('Center of Clusters')
plt.show()
#%% [markdown]
# ### Taxi rides from one cluster to another
#
# And the following animation, every arrow represents rides from one cluster to another. The width of the arrow is proportional to the relative amount of trips in the relevant hour.
#%%
df_train['pickup_cluster'] = kmeans.predict(df_train[['pickup_longitude','pickup_latitude']])
df_train['dropoff_cluster'] = kmeans.predict(df_train[['dropoff_longitude','dropoff_latitude']])
df_train['pickup_hour'] = df_train.pickup_datetime.apply(lambda x: parser.parse(x).hour )
clusters = | pd.DataFrame() | pandas.DataFrame |
import pytest
import pandas as pd
from getdera import utils
from pandas.testing import assert_frame_equal
from getdera.dera import process
# TESTCASES
TEST_DATA_PATH = 'getdera/tests/data'
TESTCASES = {
'process_tag': [
{'args': (f'{TEST_DATA_PATH}',
'risk',
'tag',
'01-01-2020',
'01-03-2020'),
'expected': pd.DataFrame({
'tag': ['AcquiredFundFeesAndExpensesBasedOnEstimates',
'AmendmentFlag',
'AmendmentFlag',
'AnnualFundOperatingExpensesTableTextBlock',
'AnnualReturn2006'],
'version': ['rr/2012',
'dei/2012',
'dei/2014',
'rr/2012',
'rr/2012'],
'dummy_value': ['lorem2020q1',
'ipsum2020q1',
'dolor2020q1',
'sit2020q1',
'amet2020q1']}).set_index(['tag', 'version'])}
],
'process_sub': [
{'args': (f'{TEST_DATA_PATH}',
'risk',
'sub',
'01-10-2019',
'15-12-2019'),
'expected': pd.DataFrame({
'adsh': ['0000000001-01-000001',
'0000000001-01-000002',
'0000000001-01-000003'],
'dummy_val': ['lorem2019q4',
None,
'dolor2019q4']}).set_index('adsh')}
],
'process': [
{'args': (f'{TEST_DATA_PATH}',
'risk',
'txt',
'01-07-2019',
'15-12-2019'),
'expected': pd.DataFrame({
'adsh': ['0000000001-01-000001',
'0000000001-01-000002',
'0000000001-01-000003']*2,
'dummy_val': ['lorem2019q3', None, 'ipsum2019q3',
'lorem2019q4', None, 'ipsum2019q4']})},
{'args': (f'{TEST_DATA_PATH}',
'risk',
'txt',
'01-10-2019',
'15-01-2020'),
'expected': pd.DataFrame({
'adsh': ['0000000001-01-000001',
'0000000001-01-000002',
'0000000001-01-000003']*2,
'dummy_val': ['lorem2020q1', None, 'ipsum2020q1',
'lorem2019q4', None, 'ipsum2019q4']})}
],
}
# FIXTURES
@pytest.fixture(scope='function', params=TESTCASES['process_tag'])
def process_tag_params(request, tmp_data_directory):
args = request.param['args']
expected = request.param['expected']
return args, expected
@pytest.fixture(scope='function', params=TESTCASES['process_sub'])
def process_sub_params(request, tmp_data_directory):
args = request.param['args']
expected = request.param['expected']
return args, expected
@pytest.fixture(scope='function', params=TESTCASES['process'])
def process_params(request, tmp_data_directory):
args = request.param['args']
expected = request.param['expected']
return args, expected
# UNIT TESTS
def test_process_tag(process_tag_params):
result = process(*process_tag_params[0])
expected = process_tag_params[1]
| assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# # day6 宿題
# 作成:松島亮輔
#
# 課題:住宅販売価格を予測する
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
#グラフをnotebook内に描画させるための設定
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.decomposition import PCA #主成分分析用ライブラリ
from sklearn.metrics import mean_squared_error, mean_absolute_error
from IPython.display import display
import seaborn as sns
from scipy.stats import norm
from sklearn.linear_model import LinearRegression #線形回帰のライブラリ
import math
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# In[2]:
df_data = pd.read_csv("../input/kc_house_data.csv")
df_data["price"] = df_data["price"] / 10**6 #単位を100万ドルにしておく
print(df_data.columns)
print(df_data.columns)
display(df_data.head())
display(df_data.tail())
# In[3]:
ex_ver = ["bedrooms","bathrooms","sqft_living","grade","sqft_above","sqft_living15"]
# trainデータとtestデータに分ける
# In[5]:
X, y = df_data.iloc[:,1:], df_data["price"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01, random_state=0)
display(X_train)
display(X_test)
display(y_train)
display(y_test)
# In[6]:
df_data = X_train
# 以下の説明変数について分析する
# In[7]:
for ver in ex_ver:
sns.jointplot(x=ver, y="price", data=df_data,kind = 'reg', size = 10)
plt.show()
# In[25]:
coord_df_data = pd.DataFrame([])
PCA_data = pd.DataFrame([])
for ver in ex_ver:
X = np.array(df_data[[ver,"price"]])
pca = PCA(n_components=2) #主成分分析用のオブジェクトをつくる。削減後の次元数を引数で指定する。2次元データなので、3以上にするとエラーになる
pca.fit(X) #主成分分析の実行
Y = np.dot((X), pca.components_.T)
PCA_data[ver] = [pca.components_]
dataframe_value = | pd.DataFrame(Y) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys
# import io
from collections import OrderedDict
from tabulate import tabulate
import decimal
from decimal import Decimal
import itertools
import numbers
import string
import numpy as np
from scipy import stats
import pandas as pd
#import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
###############################################################################
def isNumber(number):
#import numbers
try:
number = float(number)
if isinstance(number, numbers.Real):
return(True)
else:
return(False)
except:
return(False)
def factorial(x):
#import decimal
# calculate the factorial of a number
if not isNumber(x):
raise ValueError("factorial: Received invalid number.")
if (x < 0):
raise ValueError("factorial: Cannot calculate factorial of negative numbers!")
if (x == 0) or (x == 1):
return(1)
else:
result = 1
for i in range(2, x + 1):
i = decimal.Decimal(str(i))
try:
result = (result * i)
except decimal.Overflow as e:
raise ValueError("Number too big, internal error (OVERFLOW)")
except Exception as e:
raise ValueError("%s"% (ascii(e),))
return(int(result))
def comb_nonrep(n, k):
#import itertools
# calculate the number of combinations without repetitions
# for "n" objects diveded into groups of k" objects
if (n < 0) or (k < 0):
raise ValueError("received invalid negative number.")
nom = factorial(n)
denom = (factorial(k) * factorial(n - k))
result = int(nom / denom)
return(result)
def sign(number):
#import numbers
try:
number = float(number)
except Exception:
raise ValueError("ERROR: string %r is not a valid number." % (number,))
# check if supposed number is number
if (isinstance(number, numbers.Real)):
if (number > 0):
return(1)
elif (number == 0):
return(0)
elif (number < 0):
return(-1)
else:
raise ValueError("ERROR: unexpected error while evaluating %r" % (number, ))
def mad(arr):
# MAD is the Median Absolute Deviation:
# http://en.wikipedia.org/wiki/Median_absolute_deviatio
med = np.median(arr)
mad = np.median(np.abs(arr - med))
return mad
def xlog(number, base=None, round_at=None, option=None):
# http://stats.stackexchange.com/questions/1444/how-should-i-transform-non-negative-data-including-zeros
# http://robjhyndman.com/hyndsight/transformations/
#import numbers
#from decimal import Decimal
options = ["int", "sign", "shift:"]
if option:
option = str(option)
# check if supposed number is number
if not isNumber(number):
raise ValueError("number '%s' is not a valid real number." % (ascii(number),))
if option:
if (number < 0) and not ("sign" in option):
raise ValueError("cannot calculate logarithm of a negative number.")
else:
if (number < 0):
raise ValueError("cannot calculate logarithm of a negative number.")
if base:
if (base == 'e'):
pass
else:
if not isNumber(number):
raise ValueError("invalid base value '%s'" % (ascii(base),))
if (base == 1) and (number == 1):
raise ValueError("calculation of log in base '1' of number '1' is not possible.")
if (base == 0):
raise ValueError("calculation of log in base 0 is not possible.")
if (base < 0):
raise ValueError("calculation of log with a negative base is not possible.")
if (base == number):
return(1)
if option:
if (option == "sign") and (option in options):
nsign = sign(number)
number = abs(Decimal(str(number)))
elif ("shift:" in option):
sopt= option.split(":")
if not (len(sopt) == 2):
raise ValueError("invalid option '%s'" % (ascii(option),))
shift = sopt[1]
if not isNumber(shift):
raise ValueError("invalid shift value %s in option %s" % (ascii(shift), ascii(option)))
shift = float(shift)
number = number + shift
if not (isinstance(shift, numbers.Real)):
raise ValueError("shift %s is not a valid real number." % (ascii(shift),))
if (shift <= 0):
raise ValueError("shift can only be a positive integer value.")
if base:
if (base == "e"):
result = Decimal(str(number)).ln()
elif (base == 10):
result = Decimal(str(number)).log10()
else:
base = float(base)
result = (Decimal(str(number)).ln() / Decimal(str(base)).ln())
else:
result = Decimal(str(number)).log10()
if (option == "sign") and (option in options):
result = (nsign * result)
if round_at:
if not isNumber(round_at):
raise ValueError("rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
result = round(result, precision)
if (option == "int") and (option in options):
result = round(result, 0)
if result.is_zero():
result = 0
result = float(result)
return result
def xarccosh(number):
# https://en.wikipedia.org/wiki/Inverse_hyperbolic_function
# http://www.cs.washington.edu/research/projects/uns/F9/src/boost-1.39.0/libs/math/doc/sf_and_dist/html/math_toolkit/special/inv_hyper/acosh.html
#from decimal import Decimal
# arcsinh(185) = 5.913495700956333819331535103687269835462231550162396896624 - WOLFRAM ALPHA OK
# calculated using Decimal -> 5.913495700956333819331535104 - OK
if not isNumber(number):
raise ValueError(" number '%s' is not a valid real number." % (ascii(number),))
if (number < 1):
raise ValueError(" cannot calculate arccosh of a number smaller than 1.")
# calculate INVERSE ARC COS
# arccosh = ln(x + ( sqrt( (x ** 2) - 1) ) )
number = Decimal(str(number))
nsqrt = Decimal(xsqrt((number ** Decimal("2")) - Decimal("1")))
operation = number + nsqrt
result = Decimal(operation).ln()
return float(result)
def xarcsinh(number):
#from decimal import Decimal
# https://en.wikipedia.org/wiki/Inverse_hyperbolic_function
# http://worthwhile.typepad.com/worthwhile_canadian_initi/2011/07/a-rant-on-inverse-hyperbolic-sine-transformations.html
# arcsinh(185) = 5.913510310160134810673581720 - WOLFRAM ALPHA OK
# calculated using Decimal -> 5.913510310160134810673581720 - OK
if not isNumber(number):
raise ValueError(" number '%s' is not a valid real number." % (ascii(number),))
if (number < 1):
raise ValueError(" cannot calculate arcsinh of a number smaller than 1.")
# calculate INVERSE ARC SIN
# arcsinh = ln(x + ( sqrt( (x ** 2) + 1) ) )
number = Decimal(str(number))
nsqrt = Decimal(xsqrt((number**Decimal("2")) + Decimal("1")))
operation = number + nsqrt
result = Decimal(operation).ln()
return float(result)
def xarctanh(number):
# https://en.wikipedia.org/wiki/Inverse_hyperbolic_function
# http://www.cs.washington.edu/research/projects/uns/F9/src/boost-1.39.0/libs/math/doc/sf_and_dist/html/math_toolkit/special/inv_hyper/atanh.html
#from decimal import Decimal
# arcsinh(185) = 0.5493061443340548456976226184 - WOLFRAM ALPHA OK
# calculated using Decimal -> 0.5493061443340548456976226185 - OK
if not isNumber(number):
raise ValueError(" number '%s' is not a valid real number." % (ascii(number),))
if (number >= 1):
raise ValueError(" cannot calculate arctanh of a number equal or greater then 1.")
if (number <= -1):
raise ValueError(" cannot calculate arctanh of a number equal or smaller then -1.")
# calculate INVERSE ARC TAN
# arctan = (ln((1 + x) / (1 - x)) / 2)
number = Decimal(str(number))
operation = ((Decimal("1") + number) / (Decimal("1") - number))
result = (operation.ln() / Decimal("2"))
return float(result)
def xcbrt(number):
#from decimal import Decimal
if not isNumber(number):
raise ValueError(" number '%s' is not a valid real number." % (ascii(number),))
# calculate CUBIC ROOT
number = Decimal(str(number))
result = (number ** (Decimal("1") / Decimal("3")))
return float(result)
def xsqrt(number):
#from decimal import Decimal
if not isNumber(number):
raise ValueError(" number '%s' is not a valid real number." % (ascii(number),))
# calculate CUBIC ROOT
number = Decimal(str(number))
result = (number ** (Decimal("1") / Decimal("2")))
return float(result)
def bonferroni_adj(alpha, tests):
# Bonferroni adjustment = (significance level / number of tests)
#from decimal import Decimal
if not isNumber(alpha):
raise ValueError(" number '%s' is not a valid real number." % (ascii(alpha),))
if not isNumber(tests):
raise ValueError(" number '%s' is not a valid real number." % (ascii(tests),))
if (alpha < 0):
raise ValueError("received invalid probability score (p < 0).")
if (alpha > 1):
raise ValueError("received invalid probability score (p > 1).")
if (alpha < 0) or (tests < 0):
raise ValueError("received invalid negative number.")
if (alpha == 0) or (tests == 0):
raise ValueError("received parameter as zero.")
# calculate value for bonferroni adjustment
alpha = Decimal(str(alpha))
tests = Decimal(str(tests))
badj = (alpha / tests)
return badj
def isprintable(data_string):
#import string
printset = set(string.printable)
isprintable = set(data_string).issubset(printset)
return isprintable
def descriptive(nparray, selection):
if (len(nparray) < 1):
raise ValueError("ERROR: data array not valid.")
if selection:
selection = str(selection).lower()
else:
raise ValueError("ERROR: selection variable missing.")
# allocate ordered dictionary
diststats = OrderedDict()
# get data columns
columns = nparray.columns.values
# create header list
headers = list()
for name in columns:
headers.append(name)
del(columns)
if (selection == "df"):
for name in headers:
# get column type
dtype = str(nparray[name].dtypes)
if (("float" in dtype) or ("int" in dtype)):
dtype = "numerical (%s)" % (dtype,)
elif ("catego" in dtype):
dtype = "categorical"
# get total items in the columns
dtotal = float(nparray[name].shape[0])
# get total items of numeric type (excludes NaN, nulls, strings, etc..)
dvalid = float(nparray[name].count())
# get MISSING or NOT NUMERIC items (NaN, nulls, strings, etc..)
dnulls = float(nparray[name].isnull().sum())
# numeric percentage
dvalidp = float((dvalid/dtotal) * 100)
# missing percentage
dnullsp = float((dnulls/dtotal) * 100)
# prepare formatted list
dstring = [name, dtype, dtotal, dvalid, dvalidp, dnulls, dnullsp]
# add list to ordered dictionary
diststats[name] = dstring
tab_headers = ["Column", "Type", "Elements", "Valid", "Valid (%)",
"Missing", "Missing (%)"]
elif (selection == "num"):
for name in headers:
# get column type
dtype = str(nparray[name].dtypes)
if (("float" in dtype) or ("int" in dtype)):
# get total items in the columns
dtotal = float(nparray[name].shape[0])
dmin = float(nparray[name].min())
dmax = float(nparray[name].max())
drange = float(dmax - dmin)
dmean = float(nparray[name].mean())
dmedian = float(nparray[name].median())
dmode = float(nparray[name].mode())
# mean absolute deviation
dmad = float(nparray[name].mad())
# prepare formatted list
dstats = [name, dtotal, dmin, dmean, dmedian, dmode, dmax, drange, dmad]
# add list to ordered dictionary
diststats[name] = dstats
tab_headers = ["Variable", "count", "min", "mean", "median",
"mode", "max", "range","MAD"]
elif (selection == "dist"):
for name in headers:
# get column type
dtype = str(nparray[name].dtypes)
if (("float" in dtype) or ("int" in dtype)):
# get total items in the columns
dtotal = float(nparray[name].shape[0])
dstd = float(nparray[name].std())
dq1 = float(nparray[name].quantile(0.25))
dq2 = float(nparray[name].quantile(0.5))
dq3 = float(nparray[name].quantile(0.75))
dIQR = (dq3 - dq1)
dvar = float(nparray[name].var())
dskew = float(nparray[name].skew())
dkurt = float(nparray[name].kurtosis())
# prepare formatted list
ddistr = [name, dtotal, dstd, dq1, dq2, dq3, dIQR, dvar, dskew, dkurt]
# add list to ordered dictionary
diststats[name] = ddistr
tab_headers = ["Variable", "count", "stdev(n-1)", "25%", "50%",
"75%", "IQR", "variance", "skewness", "kurtosis"]
# prepare tabulation lines
tablines = list()
for k,v in diststats.items():
tablines.append(v)
# create tabulate
strtab = str(tabulate(tablines, headers=tab_headers, tablefmt='grid'))
return(strtab)
def manualbin(nparray, minval=None, maxval=None, numbins=None, binning=False):
# check start value is number
if not isNumber(minval):
minval = np.min(nparray)
if (minval > np.min(nparray)):
minval = np.min(nparray)
# check end value is number
if not isNumber(maxval):
maxval = np.max(nparray)
if (maxval < np.max(nparray)):
maxval = np.max(nparray)
if (maxval == minval):
minval = np.min(nparray)
maxval = np.max(nparray)
# sort the array in ASCENDING order
nparray = np.sort(nparray)
# check minimum array length
if (len(nparray) < 4):
return 1, minval, maxval
else:
if (isNumber(numbins) and (numbins > 0)):
# calculate bin size as np float
binsize = ((maxval - minval) / numbins)
# generate the bins of size binsize from "minval" to "maxval"
npbins = np.arange(minval, maxval, binsize)
#print(minval)
#print(maxval)
#print(numbins)
#print(binsize)
#print(npbins)
#print(ascii(binning))
if binning:
# for each element in array, get bin number of value in i-th index
# Output array of indices, of same shape as x.
binned = np.digitize(nparray, npbins)
return binsize, minval, maxval, numbins, binned
else:
return binsize, minval, maxval, numbins
else:
raise ValueError("ERROR: value for number of bins null or invalid.")
def freqency_table(nparray, numbins, minval=None, maxval=None, binning=True):
# check start value is number
if not isNumber(minval):
minval = np.min(nparray)
if (minval > np.min(nparray)):
minval = np.min(nparray)
# check end value is number
if not isNumber(maxval):
maxval = np.max(nparray)
if (maxval < np.max(nparray)):
maxval = np.max(nparray)
# check range make sense
if (maxval == minval):
minval = np.min(nparray)
maxval = np.max(nparray)
# check number of bins is correct
if not isNumber(numbins):
numbins = int(numbins)
# get total number of elements
#tot_elem = data[].shape[0]
tot_elem = len(nparray)
# sort the array in ASCENDING order
nparray = np.sort(nparray)
# get the binnig
binsize, minval, maxval, numbins, binned = manualbin(nparray, minval, maxval, numbins=numbins, binning=True)
# generate the bins of size binsize from "minval" to "maxval"
npbins = np.arange(minval, maxval, binsize)
# get how many elements per interval
unique, counts = np.unique(binned, return_counts=True)
bincount = OrderedDict(zip(unique, counts))
# create list for each interval range
headbin = list()
nbcount = 0
for num in npbins:
nbcount = nbcount + 1
imin = npbins[(nbcount - 1)]
if (nbcount < numbins):
imax = npbins[(nbcount)]
elif (nbcount == numbins):
imax = maxval
else:
raise ValueError()
headbin.append([nbcount, imin, imax])
del(npbins)
# add bin count to each list
for pos, val in bincount.items():
for elem in headbin:
if (elem[0] == pos):
elem.append(val)
# add zero to any interval with no items
for pos, val in bincount.items():
for elem in headbin:
if (len(elem) == 3):
elem.append(0)
del(bincount)
ftable = list()
tot_freq = 0
tot_freqp = 0
for inter in headbin:
# set interval headers
if (inter[0] < numbins):
interval = "[%s <-> %s)" % (inter[1], inter[2])
else:
interval = "[%s <-> %s]" % (inter[1], inter[2])
# frequency
freq = inter[3]
# frequency percentage
freqp = ((freq / float(tot_elem)) * 100)
# cumulative frequency
tot_freq = tot_freq + freq
# cumulative frequency percentage
tot_freqp = ((tot_freq / float(tot_elem)) * 100)
# set printable list
dstring =[interval, freq, freqp, tot_freq, tot_freqp]
ftable.append(dstring)
freq_headers = ["Interval", "Frequency", "Frequency (%)", "Cumulative Freq.", "Cumulative Freq. (%)"]
# create tabulate
strtab = (str(tabulate(ftable, headers=freq_headers, tablefmt='orgtbl')) + "\n")
return(strtab)
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
def mm2inch(*tupl):
inch = 25.4
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
def uniquevals(nparray):
# create temp dictionary to store unique value count
tvalcount = dict()
# count unique values
for item in nparray:
# save unique diametrer values
tvalcount[item] = tvalcount.get(item, 0) + 1
# sort temp dictionary by key (that is numeric) and store in ordered dict
valcount = OrderedDict(sorted(tvalcount.items()))
# delete temp dictionary
del(tvalcount)
# return counts
return valcount
def splitdict(dictionary):
# splits a dictionary into 2 nparray, one for K, one for V
listA = list()
listB = list()
for key, value in dictionary.items():
listA.append(float(key))
listB.append(float(value))
arrA = np.array(listA)
arrB = np.array(listB)
del(listA)
del(listB)
return arrA, arrB
def val_shift(nparray, shift):
listA = list()
for value in nparray:
listA.append(value + shift)
arrA = np.array(listA)
del(listA)
return arrA
def axis_major_ticks(nparray):
# get numeric range
valrange = (max(nparray) - min(nparray))
# get 1% of the range
onep = (valrange / 100.0)
# set interval to use as spacer before min and after max (default 3% of range)
spacer = (onep * 3)
# set tick interval to 10%
tickint = (onep * 10)
# get array minimum value
amin = min(nparray)
# get array maximum value
amax = max(nparray)
# set lower range
lowrng = (amin - spacer)
# set higher range
higrng = (amax + spacer)
# set minimum ticker value
mintick = amin
# set maximum ticker value + 1% (otherwise max value is NOT visible)
maxtick = (amax + (onep))
# calculate all values to use as tickers withing defined range
# to see the end value we set max value as (max + value equal to 1% of range)
major_ticks = np.arange(mintick, maxtick, tickint)
# return all elements
return lowrng, higrng, major_ticks, amin, amax
def chi2_testtype(variable1, variable2, dataframe):
#check that variables are categorical
v1dtype = str(dataframe[variable1].dtypes)
if (v1dtype == 'category'):
v1dtypes = "PASS"
else:
v1dtypes = "FAIL"
v2dtype = str(dataframe[variable2].dtypes)
if (v2dtype == 'category'):
v2dtypes = "PASS"
else:
v2dtypes = "FAIL"
# print verbose results
print("TEST --> Check that both variable are 'CATEGORICAL'")
print("Variable: '%s' - Type: '%s' (%s)" % (variable1, v1dtype, v1dtypes))
print("Variable: '%s' - Type: '%s' (%s)" % (variable2, v2dtype, v2dtypes))
if (v1dtypes == 'FAIL') or (v2dtypes == 'FAIL'):
print("Status --> FAIL - Not all variables are categorical, quitting.")
raise ValueError("Not all variables are categorical.")
else:
print("Status --> PASS - All variables are categorical.\n")
def chi2_testlen(variable1, variable2, dataframe):
#check that data series have at least 2 groups
v1uq = uniquevals(dataframe[variable1])
v1len = len(v1uq)
v1names = list()
for k, v in v1uq.items():
v1names.append(str(k))
if (v1len >= 2):
v1lens = "PASS"
else:
v1lens = "FAIL"
v2uq = uniquevals(dataframe[variable2])
v2len = len(v2uq)
v2names = list()
for k, v in v2uq.items():
v2names.append(str(k))
if (v2len >= 2):
v2lens = "PASS"
else:
v2lens = "FAIL"
# print verbose results
print("TEST --> Check that each variable has at least 2 data groups")
print("Variable: '%s' - Groups: '%s' - Names: %s (%s)" % (variable1, v1len, v1names, v1lens))
print("Variable: '%s' - Groups: '%s' - Names: %s (%s)" % (variable2, v2len, v2names, v2lens))
if (v1lens == 'FAIL') or (v2lens == 'FAIL'):
print("Status--> FAIL - Not all variables have at least 2 data groups, quitting.")
raise ValueError("Not all variables have at least 2 data groups.")
else:
print("Status --> PASS - All variables have at least 2 data groups.\n")
def chi2_df(variable1, variable2, dataframe):
v1uq = uniquevals(dataframe[variable1])
v1len = len(v1uq)
v2uq = uniquevals(dataframe[variable2])
v2len = len(v2uq)
# chi2 degrees of freedom
# r is number of rows, c is number of columns
# ((r -1) * (c - 1))
df = ((v2len - 1) * (v1len - 1))
return int(df)
def chi2_pdev(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 % dev - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate percentage deviation
# (((Observed - Expected) / Expected) * 100)
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (observed - expected)
val_pdev = ((residual / expected) * 100)
if round_at:
val_pdev = round(val_pdev, precision)
return float(val_pdev)
def chi2_stdres(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 stdev - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate standardised (Pearson) residual
# ((Observed - Expected) / sqrt(Expected))
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (observed - expected)
sqrtexp = xsqrt(expected)
val_stdres = (residual / Decimal(str(sqrtexp)))
if round_at:
val_stdres = round(val_stdres, precision)
return float(val_stdres)
def chi2_adjusted_residual(observed, expected, rowtot, coltot, total, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 stdev - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate adjusted residual
# numerator = (observed - expected)
# row proportion = ( row marginal total / grand total)
# column proportion = ( column marginal total / grand total)
# denominator = sqrt(expected * (1-row proportion) * (1-column proportion))
# adjusted residual = (numerator / denominator)
observed = Decimal(str(observed))
expected = Decimal(str(expected))
rowtot = Decimal(str(rowtot))
coltot = Decimal(str(coltot))
total = Decimal(str(total))
residual = (observed - expected)
denominator = xsqrt(expected * (1 - (rowtot / total)) * (1 - (coltot / total)))
denominator = Decimal(str(denominator))
val_adjres = (residual / denominator)
if round_at:
val_adjres = round(val_adjres, precision)
return float(val_adjres)
def chi2_contrib(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 contrib - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate standardised (Pearson) residual
# ((Observed - Expected) / sqrt(Expected))
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (observed - expected)
val_contrib = ((residual ** 2) / expected)
if round_at:
val_contrib = round(val_contrib, precision)
return float(val_contrib)
def chi2_pdev_yates(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 % dev - Yates - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate percentage deviation
# (((Observed - Expected) / Expected) * 100)
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (abs(observed - expected) - Decimal(str(0.5)))
val_pdev = ((residual / expected) * 100)
if round_at:
val_pdev = round(val_pdev, precision)
if (observed > expected):
(val_pdev * 1)
elif (observed < expected):
(val_pdev * -1)
return float(val_pdev)
def chi2_stdres_yates(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 stdev - Yates - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate standardised (Pearson) residual
# ((Observed - Expected) / sqrt(Expected))
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (abs(observed - expected) - Decimal(str(0.5)))
sqrtexp = xsqrt(expected)
val_stdres = (residual / Decimal(str(sqrtexp)))
if round_at:
val_stdres = round(val_stdres, precision)
if (observed > expected):
(val_stdres * 1)
elif (observed < expected):
(val_stdres * -1)
return float(val_stdres)
def chi2_contrib_yates(observed, expected, round_at=None):
# chi2 YATES = ( ( ( abs(observed — expected) — 0.5) / expected) x 100)
# The resulting value is then given a positive sign if observed > expected
# and a negative sign if observed < expected.
if round_at:
if not isNumber(round_at):
raise ValueError("chi2 contrib - Yates - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# calculate standardised (Pearson) residual
# ((Observed - Expected) / sqrt(Expected))
observed = Decimal(str(observed))
expected = Decimal(str(expected))
residual = (abs(observed - expected) - Decimal(str(0.5)))
val_contrib = ((residual ** 2) / expected)
if round_at:
val_contrib = round(val_contrib, precision)
return float(val_contrib)
def phi_coefficient(chi2, totalelem, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("Phi coeff - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# phi coefficient
# sqrt(chi2 / total elements)
# to use ONLY with 2x2 tables
chi2 = Decimal(str(chi2))
totalelem = Decimal(str(totalelem))
phi = xsqrt(chi2 / totalelem)
if round_at:
phi = round(phi, precision)
phi = float(phi)
# Phi coefficient Interpretation
# Using table of Rea & Parker (1992)
# http://files.eric.ed.gov/fulltext/EJ955682.pdf
# https://c.ymcdn.com/sites/aisnet.org/resource/group/3f1cd2cf-a29b-4822-8581-7b1360e30c71/Spring_2003/kotrlikwilliamsspring2003.pdf
# .00 and under .10 --> Negligible association
# .10 and under .20 --> Weak association
# .20 and under .40 --> Moderate association
# .40 and under .60 --> Relatively strong association
# .60 and under .80 --> Strong association
# .80 and under 1.00 --> Very strong association
interpretation = ""
if (phi >= 0) and (phi < 0.1):
interpretation = ("Negligible association")
elif (phi >= 0.1) and (phi < 0.2):
interpretation = ("Weak association")
elif (phi >= 0.1) and (phi < 0.4):
interpretation = ("Moderate association")
elif (phi >= 0.1) and (phi < 0.6):
interpretation = ("Relatively strong association")
elif (phi >= 0.1) and (phi < 0.8):
interpretation = ("Strong association")
elif (phi >= 0.1) and (phi < 1):
interpretation = ("Very strong association")
elif (phi == 1):
interpretation = ("Perfect match")
final = "%s (%s)" % (phi, interpretation)
return final
def cramer_V(chi2, totalelem, minrowcol, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("Cramer's V - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# Cramers's V (V)
# t = minimum value between (number of rows - 1) and (number of columns - 1)
# sqrt(chi2 / (total elements * t))
# to use ONLY with rectangular tables
# only with tables having different number of rows and columns (3x4, 4x6, etc)
chi2 = Decimal(str(chi2))
totalelem = Decimal(str(totalelem))
minrowcol = Decimal(str(minrowcol))
t = (minrowcol - Decimal(str(1)))
cramer = xsqrt(chi2 / (totalelem * t))
if round_at:
cramer = round(cramer, precision)
cramer = float(cramer)
# Cramer’s Interpretation
# Using table of Rea & Parker (1992)
# https://c.ymcdn.com/sites/aisnet.org/resource/group/3f1cd2cf-a29b-4822-8581-7b1360e30c71/Spring_2003/kotrlikwilliamsspring2003.pdf
# http://files.eric.ed.gov/fulltext/EJ955682.pdf
# .00 and under .10 --> Negligible association
# .10 and under .20 --> Weak association
# .20 and under .40 --> Moderate association
# .40 and under .60 --> Relatively strong association
# .60 and under .80 --> Strong association
# .80 and under 1.00 --> Very strong association
interpretation = ""
if (cramer >= 0) and (cramer < 0.1):
interpretation = ("Negligible association")
elif (cramer >= 0.1) and (cramer < 0.2):
interpretation = ("Weak association")
elif (cramer >= 0.1) and (cramer < 0.4):
interpretation = ("Moderate association")
elif (cramer >= 0.1) and (cramer < 0.6):
interpretation = ("Relatively strong association")
elif (cramer >= 0.1) and (cramer < 0.8):
interpretation = ("Strong association")
elif (cramer >= 0.1) and (cramer < 1):
interpretation = ("Very strong association")
elif (cramer == 1):
interpretation = ("Perfect match")
final = "%s (%s)" % (cramer, interpretation)
return final
def contingency_coeff(chi2, totalelem, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("Observed Contingency coeff - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# PEARSON contingency coefficient C
# sqrt(chi2 / (chi2 + number total elements))
# to use ONLY with quadratic tables
# only with tables having same number of rows and columns (3x3, 4x4, etc)
chi2 = Decimal(str(chi2))
totalelem = Decimal(str(totalelem))
cC = xsqrt(chi2 / (chi2 + totalelem))
if round_at:
cC = round(cC, precision)
return float(cC)
def contingency_coeff_corr(chi2, totalelem, minrowcol, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("Observed Contingency Coeff Corrented - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# PEARSON contingency coefficient C - corrected
# m = minimum value between (number of rows) and (number of columns)
# numerator = (chi2 * m)
# denominator = ((chi2 + number total elements) * (m - 1))
# corrected coefficient = sqrt(numerator / denominator)
# to use ONLY with quadratic tables
# only with tables having same number of rows and columns (3x3, 4x4, etc)
chi2 = Decimal(str(chi2))
totalelem = Decimal(str(totalelem))
minrowcol = Decimal(str(minrowcol))
nom = (chi2 * minrowcol)
denom = ((chi2 + totalelem) * (minrowcol - Decimal(str(1))))
cC_corr = xsqrt(nom / denom)
if round_at:
cC_corr = round(cC_corr, precision)
return float(cC_corr)
def standardized_contingency_coeff(obeserved_contingency_coeff, nrows, ncolumns, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("Observed Contingency Coeff Corrented - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
obeserved_contingency_coeff = Decimal(str(obeserved_contingency_coeff))
nrows = Decimal(str(nrows))
ncolumns = Decimal(str(ncolumns))
crows = ((nrows - Decimal(str(1))) / nrows)
ccols = ((ncolumns - Decimal(str(1))) / ncolumns)
# calculate contingency coefficient maximum value
cont_coeff_max = ((crows * ccols) ** (Decimal(str(1))/Decimal(str(4))))
# calculate standardized value
cont_coeff_std = (obeserved_contingency_coeff / cont_coeff_max)
if round_at:
cont_coeff_std = round(cont_coeff_std, precision)
cont_coeff_std = float(cont_coeff_std)
# Standardized Contingency Coefficient Interpretation
# Analyzing Quantitative Data: From Description to Explanation, By <NAME>, page 100
# https://books.google.fr/books?id=Tv_-YxqWVQ8C&printsec=frontcover#v=onepage&q&f=false
# .00 and under .01 --> No association
# .01 and under .10 --> Negligible association
# .10 and under .30 --> Weak association
# .30 and under .60 --> Moderate association
# .60 and under .75 --> Strong association
# .75 and under .99 --> Very strong association
# .99 and 1 --> Perfect association
interpretation = ""
if (cont_coeff_std >= 0) and (cont_coeff_std < 0.01):
interpretation = ("No association")
elif (cont_coeff_std >= 0.01) and (cont_coeff_std < 0.1):
interpretation = ("Negligible association")
elif (cont_coeff_std >= 0.10) and (cont_coeff_std < 0.30):
interpretation = ("Weak association")
elif (cont_coeff_std >= 0.30) and (cont_coeff_std < 0.60):
interpretation = ("Moderate association")
elif (cont_coeff_std >= 0.60) and (cont_coeff_std < 0.75):
interpretation = ("Strong association")
elif (cont_coeff_std >= 0.75) and (cont_coeff_std < 0.99):
interpretation = ("Very Strong association")
elif (cont_coeff_std >= 0.99) and (cont_coeff_std <= 1.0):
interpretation = ("Perfect association")
final = "%s (%s)" % (cont_coeff_std, interpretation)
return final
def likelihood_ratio_contrib(observed, expected, round_at=None):
if round_at:
if not isNumber(round_at):
raise ValueError("likelihood ratio contrib - rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 0
# the method requires if a cell is not valid, negative or zero has to be skipped
# so we mark any invalid cell as zero therefore will have no impact
if (observed == 0) or (expected == 0) :
return 0
else:
# calculate standardised (Pearson) residual
# ((Observed - Expected) / sqrt(Expected))
observed = Decimal(str(observed))
expected = Decimal(str(expected))
ratio = float(observed / expected)
lratioc = (observed * xlog(ratio, base='e'))
if round_at:
lratioc = round(lratioc, precision)
return float(lratioc)
def chi2_expected_check(nrows, ncolumns, list_all_expected_values):
# set counter
exp_neg_vals = 0
exp_zero = 0
exp_gt0_lt1_vals = 0
exp_egt1_lt5_vals = 0
exp_egt5_lt10_vals = 0
exp_egt10_vals = 0
# parse list of expected count and check each value
for item in list_all_expected_values:
if (item < 0):
exp_neg_vals = exp_neg_vals + 1
elif (item == 0):
exp_zero = exp_zero + 1
elif (item > 0) and (item < 1):
exp_gt0_lt1_vals = exp_gt0_lt1_vals + 1
elif (item >= 1) and (item < 5):
exp_egt1_lt5_vals = exp_egt1_lt5_vals + 1
elif (item >= 5) and (item < 10):
exp_egt5_lt10_vals = exp_egt5_lt10_vals + 1
elif (item >= 10):
exp_egt10_vals = exp_egt10_vals + 1
else:
raise ValueError("Unexpected: malformed element in list of expected counts.")
total_values = len(list_all_expected_values)
test_logs = list()
test_fail_1 = False
test_fail_2 = False
test_fail_3 = False
data_isCompliant = True
# for all table sizes
test_header = "CHI SQUARED - Testing if all 'expected count' cells satisfy minimum requirements.\n"
test_logs.append(test_header)
# Assumtion Check 1: no expected count negative
test1_text = "Assumption --> No expected count has negative value."
test_logs.append(test1_text)
test1_found = int(exp_neg_vals)
test1_foundp = ((test1_found / total_values) * 100)
test1_counts = "TEST: %s (%s%%) cells with an expected count less than 0." % (test1_found, test1_foundp)
test_logs.append(test1_counts)
if (test1_found == 0):
test_logs.append("Status --> PASS\n")
else:
test_fail_1 = True
test_logs.append("ERROR: Calculation will produce unrecoverable error 'Domain Error'\n")
test_logs.append("Status --> FAIL\n")
# Assumtion Check 2: no expected count equal to zero
test2_text = "Assumption --> No expected count has a value of zero."
test_logs.append(test2_text)
test2_found = int(exp_zero)
test2_foundp = ((test2_found / total_values) * 100)
test2_counts = "TEST: %s (%s%%) cells with an expected count equal to 0." % (test2_found, test2_foundp)
test_logs.append(test2_counts)
if (test2_found == 0):
test_logs.append("Status --> PASS\n")
else:
test_fail_2 = True
test_logs.append("ERROR: Calculation will produce unrecoverable error 'Division by zero'\n")
test_logs.append("Status --> FAIL\n")
# Assumtion Check 3: no expected count less than 1
test3_text = "Assumption --> No expected count has a value smaller than 1."
test_logs.append(test3_text)
test3_found = int(exp_gt0_lt1_vals)
test3_foundp = ((test3_found / total_values) * 100)
test3_counts = "TEST: %s (%s%%) cells with an expected count less than 1." % (test3_found, test3_foundp)
test_logs.append(test3_counts)
if (test3_found == 0):
test_logs.append("Status --> PASS\n")
else:
test_fail_3 = True
test_logs.append("ERROR: No reliable result can be produced with expected count less then 1.")
test_logs.append("Status --> FAIL\n")
# calculate 20% cutoff
cutoff_20 = (total_values * 0.2)
# for tables equal to 2x2
if (nrows == 2) and (ncolumns == 2):
# Assumtion Check 4: no expected count less than 5
# If any expected counts are less than 5, then some other test should be used
# Cochran (1952, 1954)
test4_text = "Assumption (for tables equal to 2x2) --> No expected count has a value smaller than 5."
test_logs.append(test4_text)
test4_found = (int(exp_gt0_lt1_vals) + int(exp_egt1_lt5_vals))
test4_foundp = ((test4_found / total_values) * 100)
test4_counts = "TEST: %s (%s%%) cells with an expected count less than 5." % (test4_found, test4_foundp)
test_logs.append(test4_counts)
if (test4_found == 0):
test_logs.append("Status --> PASS\n")
else:
test_logs.append("WARNING: If any expected counts are less than 5, then some other test should be used.")
test_logs.append("Status --> FAIL\n")
# Assumtion Check 5: All expected counts should be 10 or greater.
test5_text = "Assumption (for tables equal to 2x2) --> No expected count has a value smaller than 10."
test_logs.append(test5_text)
test5_found = (int(exp_gt0_lt1_vals) + int(exp_egt1_lt5_vals) + int(exp_egt5_lt10_vals))
test5_foundp = ((test5_found / total_values) * 100)
test5_counts = "TEST: %s (%s%%) cells with an expected count less than 10." % (test5_found, test5_foundp)
test_logs.append(test5_counts)
if (test5_found < cutoff_20):
test_logs.append("Status --> PASS\n")
else:
test_logs.append("WARNING: All expected counts should be 10 or greater.")
test_logs.append("Status --> FAIL\n")
# for tables bigger than 2x2
else:
# Assumtion Check 6: No more than 20% of the expected counts are less than 5
# No more than 20% of the expected counts are less than 5
# (Yates, Moore & McCabe, 1999, p. 734).
test6_text = "Assumption (for tables bigger than 2x2) --> No more than 20% of the expected counts are less than 5."
test_logs.append(test6_text)
test6_found = (int(exp_gt0_lt1_vals) + int(exp_egt1_lt5_vals))
test6_foundp = ((test6_found / total_values) * 100)
test6_counts = "TEST - %s (%s%%) cells with an expected count less than 5." % (test6_found, test6_foundp)
test_logs.append(test6_counts)
if (test6_found < cutoff_20):
test_logs.append("Status --> PASS\n")
else:
test_logs.append("WARNING: More than 20% of the expected counts are less than 5, some other test should be used.")
test_logs.append("Status --> FAIL\n")
# report critical error if any of the first 3 test conditions are TRUE
if (test_fail_1 == True) or (test_fail_2 == True) or (test_fail_3 == True):
data_isCompliant = False
test_logs.append("DETECTED CRITICAL ERRORS:")
test_logs.append("Not possible to perform chi squared analysis as data does not meet basic requirements.")
test_logs.append("A change in data structure or data grouping is required.\n")
# return test results
if (data_isCompliant == False):
return ("FAIL", test_logs)
else:
return ("PASS", test_logs)
def chi2_crosstab(variable1, variable2, dataframe, alpha=None, round_at=None, verbose=None):
# set rounding precision if number is valid
if round_at:
if not isNumber(round_at):
raise ValueError("rounding precision '%s' is not a valid real number." % (ascii(round_at),))
precision = int(Decimal(str(round_at)))
if (precision < 0):
precision = 9
if alpha:
if not isNumber(alpha):
raise ValueError("alpha '%s' is not a valid real number between 0 and 1." % (ascii(alpha),))
alpha = float(Decimal(str(alpha)))
if (alpha < 0) or (alpha > 1):
alpha = 0.05
else:
alpha = 0.05
# check that both data series have same amount of element
count1 = float(dataframe[variable1].shape[0])
count2 = float(dataframe[variable2].shape[0])
if not (count1 == count2):
raise AssertionError("numeric series do not contain the same amount of observations.")
# calculate degrees of freedom
df = chi2_df(variable1, variable2, dataframe)
# get name of groups for variable 1 (X)
head_rows = list()
v1uq = uniquevals(dataframe[variable1])
for k1, v1 in v1uq.items():
head_rows.append(str(k1))
head_rows = sorted(head_rows)
rows = len(v1uq)
# get name of groups for variable 2 (Y)
head_cols = list()
v2uq = uniquevals(dataframe[variable2])
for k2, v2 in v2uq.items():
head_cols.append(str(k2))
head_cols = sorted(head_cols)
columns = len(v2uq)
if (rows < 2) or (columns < 2):
raise AssertionError("Cannot compute chi squared for table smaller than 2x2")
# create associations between variable 1 (X) and variable 2 (Y)
# variable 1 should be EXPECTED VARIABLE
# variable 2 should be REFERENCE VARIABLE
pairs = dict()
for k1, v1 in v1uq.items():
for k2, v2 in v2uq.items():
pair = "%s,%s" % (str(k1), str(k2))
# set each cross-association to value count zero
pairs[pair] = 0
# calculate value counts for each association
for index, row in dataframe.iterrows():
valvar1 = row[variable1]
valvar2 = row[variable2]
pair = "%s,%s" % (str(valvar1), str(valvar2))
# increment each cross-association
pairs[pair] = pairs.get(pair, 0) + 1
# create disctionary to store cross tab relations between variables
tabrows = OrderedDict()
# for each variable group of X
for group1 in head_rows:
# create a dictionary element with that name and assign a list to it
tabrows[group1] = list()
# for each variable group in Y
for group2 in head_cols:
# crete variable association string (X,Y)
hpair = "%s,%s" % (str(group1), str(group2))
# check that association is in the pre calcolated association list
if (hpair in pairs):
# get the value for that association
valcount = pairs[hpair]
# append value to the list
tabrows[group1].append(valcount)
# if not in pre calcolated association list
else:
# raise error to alert of internal
raise ValueError("Unexpected: pre computed crosstab relation '%s' not present." % (hpair,))
tabrows[group1].append(v1uq[group1])
# create list with all column total values
tabrows["Total"] = list()
alltot = 0
for group2 in head_cols:
tabrows["Total"].append(v2uq[group2])
alltot = alltot + v2uq[group2]
tabrows["Total"].append(alltot)
# copy dictionary so we can work on the copy to add element
temprows = tabrows.copy()
# calculate expected counts
excount = 0
# lists for internal calculations (max precision, no rounding)
list_allobserved = list()
list_allexp = list()
list_allchi2 = list()
list_allchi2_yates = list()
list_alllikelihood = list()
# number of total elements
totalelem = tabrows["Total"][-1]
# create cross tabulation to check each value and check calculated values
for k, v in tabrows.items():
if (k == "Total"):
pass
else:
# list for expected count
head_exp = k + " (expected)"
list_expected = list()
sum_expected = Decimal("0")
# list for residuals
head_res = k + " (residual)"
list_residual = list()
sum_residuals = Decimal("0")
# list for standardised residuals
head_stdres = k + " (std. residual)"
list_stdresidual = list()
head_stdres_yates = k + " (std. residual, Yates)"
list_stdresidual_yates = list()
# list for adjusted residuals
head_adjresiduals = k + " (adj. residual)"
list_adjresiduals = list()
# list for percentage deviation
head_pdeviation = k + " (% deviation)"
list_pdeviation = list()
head_pdeviation_yates = k + " (% deviation, Yates)"
list_pdeviation_yates = list()
# list for chi 2 cell contribution
head_chi2contrib = k + " (chi2 contrib.)"
list_chi2contrib = list()
head_chi2contrib_yates = k + " (chi2 contrib., Yates)"
list_chi2contrib_yates = list()
# Likelihood ratio
head_liker = k + " (likelihood ratio)"
list_likelihood_ratio = list()
# set step counter to zero
excount = 0
# parse each count
for item in v:
# set each count to Decimal type
item = Decimal(item)
# check if we finished the element counts
if (excount == columns):
# skip last column that has marginal totals
pass
# start parsing and counting
else:
# calculate expected value
total_row = Decimal(str(v[-1]))
total_col = Decimal(str(tabrows["Total"][excount]))
total_tab = Decimal(str(totalelem))
list_allobserved.append(float(item))
# calculate expected value
val_exp = ((total_col * total_row) / total_tab)
list_allexp.append(float(val_exp))
if round_at:
val_expr = round(val_exp, precision)
list_expected.append(float(val_expr))
sum_expected = sum_expected + val_exp
# check for possible division by zero errors
if (val_exp == Decimal(0)):
list_pdeviation.append("Division by zero")
list_pdeviation_yates.append("Division by zero")
list_stdresidual.append("Division by zero")
list_stdresidual_yates.append("Division by zero")
list_adjresiduals.append("Division by zero")
list_chi2contrib.append("Division by zero")
list_chi2contrib_yates.append("Division by zero")
list_likelihood_ratio.append("Division by zero")
# safety, this WILL generate an internal error
# as we mix string and floats TO BE SURE we spot this
list_allchi2.append("Division by zero")
list_allchi2_yates.append("Division by zero")
list_alllikelihood.append("Division by zero")
else:
# calculate residual
# (Observed - Expected)
val_res = (item - Decimal(str(val_exp)))
if round_at:
val_res = round(val_res, precision)
list_residual.append(float(val_res))
sum_residuals = sum_residuals + val_res
# calculate percentage deviation
val_pdev = chi2_pdev(item, val_exp, round_at=precision)
list_pdeviation.append(val_pdev)
# calculate percentage deviation - YATES correction
val_pdevy = chi2_pdev_yates(item, val_exp, round_at=precision)
list_pdeviation_yates.append(val_pdevy)
# calculate standardised (Pearson) residual
val_stdres = chi2_stdres(item, val_exp, round_at=precision)
list_stdresidual.append(val_stdres)
# calculate standardised (Pearson) residual - YATES correction
val_stdresy = chi2_stdres_yates(item, val_exp, round_at=precision)
list_stdresidual_yates.append(val_stdresy)
# calculate adjusted residual
val_adjres = chi2_adjusted_residual(item, val_exp, total_row, total_col, total_tab, round_at=precision)
list_adjresiduals.append(val_adjres)
# ELEMENTS FOR LATER CALCULATIONS - NEED TO STAY NOT ROUNDED
# calculate chi square contribution
chi2contrib = chi2_contrib(item, val_exp)
list_allchi2.append(chi2contrib)
# calculate chi square contribution - YATES correction
chi2contriby = chi2_contrib_yates(item, val_exp)
list_allchi2_yates.append(chi2contriby)
# calculate likelihood ratio (G) contribution
likelihoodr = (2 * likelihood_ratio_contrib(item, val_exp))
list_alllikelihood.append(likelihoodr)
# for each list of data to use in tabular visualization
if round_at:
# add rounded elements to lists
list_chi2contrib.append(round(chi2contrib, precision))
list_chi2contrib_yates.append(round(chi2contriby, precision))
list_likelihood_ratio.append(round(likelihoodr, precision))
else:
# add raw value to the list
list_chi2contrib.append(chi2contrib)
list_chi2contrib_yates.append(chi2contriby)
list_likelihood_ratio.append(likelihoodr)
# increment step counter
excount = excount + 1
# clean all intermediate variables at the end of each pass
del(total_row)
del(total_col)
del(total_tab)
del(val_exp)
del(val_res)
del(val_pdev)
del(val_pdevy)
del(val_stdres)
del(val_stdresy)
del(val_adjres)
del(chi2contrib)
del(chi2contriby)
del(likelihoodr)
# check sum of expected counts and add to list
sum_expected = int(round(sum_expected, 0))
if not (sum_expected == v1uq[k]):
raise AssertionError("Unexpected: sum of expected counts for '%s' should be '%s', got '%s'." % (k, v1uq[k], sum_expected))
else:
list_expected.append(sum_expected)
temprows[head_exp] = list_expected
del(list_expected)
# check sum of residual values and add to list
sum_residuals = int(round(sum_residuals, 0))
if not (sum_residuals == 0):
raise AssertionError("Unexpected: sum of residuals for '%s' should be zero, got '%s'." % (k, sum_residuals,))
else:
sum_residuals = abs(sum_residuals)
list_residual.append(sum_residuals)
temprows[head_res] = list_residual
del(list_residual)
# add list of percentage deviations to reference dictionary
temprows[head_pdeviation] = list_pdeviation
temprows[head_pdeviation_yates] = list_pdeviation_yates
del(list_pdeviation)
del(list_pdeviation_yates)
# add list of standardised residuals to reference dictionary
temprows[head_stdres] = list_stdresidual
temprows[head_stdres_yates] = list_stdresidual_yates
del(list_stdresidual)
del(list_stdresidual_yates)
# add list of adjusted residuals to reference dictionary
temprows[head_adjresiduals] = list_adjresiduals
del(list_adjresiduals)
# add list of chi2 cell contributions to reference dictionary
temprows[head_chi2contrib] = list_chi2contrib
temprows[head_chi2contrib_yates] = list_chi2contrib_yates
del(list_chi2contrib)
del(list_chi2contrib_yates)
# add list of clikelihood ratios to reference dictionary
temprows[head_liker] = list_likelihood_ratio
del(list_likelihood_ratio)
# reset step counter
excount = 0
###########################################################################
# prepare tables to print
# create header line
freq_headers = [""]
for item in head_cols:
freq_headers.append(item)
freq_headers.append("Total")
# create value table
table_fit = list()
for k, v in temprows.items():
printrow = list()
printrow.append(k)
for value in v:
printrow.append(value)
# row name + values + total
if (len(printrow) == (len(head_cols) + 2)):
pass
# row name + values
elif (len(printrow) == (len(head_cols) + 1)):
printrow.append("-")
else:
raise AssertionError("Elements in list does not match expected number of columns.")
table_fit.append(printrow)
# create list to store all values
table_all = list()
# create list to store only observation counts
table_data = list()
for group in head_rows:
for line in table_fit:
line_name = str(line[0])
if (line_name == group):
table_data.append(line)
if (line_name.startswith(group)):
if ("expected" in line_name):
table_data.append(line)
table_all.append(line)
for line in table_fit:
line_name = str(line[0])
if (line_name.startswith("Total")):
table_data.append(line)
table_all.append(line)
# create tabulate
print_table_data = str(tabulate(table_data, headers=freq_headers
, tablefmt='pipe'
, stralign='left'
, numalign='left'))
# create tabulate
print_table_all = str(tabulate(table_all, headers=freq_headers
, tablefmt='pipe'
, stralign='left'
, numalign='left'))
###########################################################################
# Check for data issues, if found print data table and log, then exit
status, statuslog = chi2_expected_check(rows, columns, list_allexp)
if (status == "FAIL"):
print("Chi Squared - Contingency Table\n")
print(print_table_data)
print()
for line in statuslog:
print(line)
print("Quitting!\n")
print()
sys.exit(1)
else:
if (verbose == True):
for line in statuslog:
print(line)
###########################################################################
# perform all calculation
# total number of cells
tot_cells = (rows * columns)
# minimum expected value
if round_at:
expected_min = round(min(list_allexp), precision)
else:
expected_min = min(list_allexp)
# minimum observed value
observed_min = min(list_allobserved)
# calculate chi value - sum of all contributors - using RAW value list
chi2 = sum(list_allchi2)
# calculate chi value std dev
chi2_stdev = np.array(list_allchi2).std()
# calculate chi value - yates correction - using RAW value list
chi2_yates = sum(list_allchi2_yates)
# calculate chi value std dev - yates correction
chi2_stdev_yates = np.array(list_allchi2_yates).std()
# probability value
p_val = stats.chi2.sf(chi2, df)
# probability value - YATES
p_val_yates = stats.chi2.sf(chi2_yates, df)
# phi coefficient
phi_coeff = phi_coefficient(chi2, totalelem)
# contingency coefficient C
obs_cont_coeff = contingency_coeff(chi2, totalelem)
if (rows < columns):
# corrected contingency coefficient
obs_cont_coeff_corr = contingency_coeff_corr(chi2, totalelem, rows)
else:
# corrected contingency coefficient
obs_cont_coeff_corr = contingency_coeff_corr(chi2, totalelem, columns)
if (rows == columns):
cont_coeff_std = standardized_contingency_coeff(obs_cont_coeff, rows, columns)
cont_coeff_std_corr = standardized_contingency_coeff(obs_cont_coeff_corr, rows, columns)
# cramer's V
if (rows < columns):
cramerV = cramer_V(chi2, totalelem, rows)
else:
cramerV = cramer_V(chi2, totalelem, columns)
###########################################################################
# Interpret results
# chi squared - interpretation
# http://www.itl.nist.gov/div898/handbook/eda/section3/eda3674.htm
# upper tail, one sided - we use alpha
chi2_ut_1s = alpha
# calculate chi 2 critical value
chi2_CV_ut_1s = stats.chi2.isf(chi2_ut_1s, df)
# check if we can accept or reject null hypothesis - chi squared
if (chi2 > chi2_CV_ut_1s):
chi2_iterp_ut_1s = "Rejected"
else:
chi2_iterp_ut_1s = "Accepted"
# check if we can accept or reject null hypothesis - chi squared - YATES
if (chi2_yates > chi2_CV_ut_1s):
chi2_iterp_ut_1s_yates = "Rejected"
else:
chi2_iterp_ut_1s_yates = "Accepted"
# lower tail, one sided - we use abs(alpha-1)
chi2_lt_1s = abs(alpha-1)
# calculater chi 2 critical value
chi2_CV_lt_1s = stats.chi2.isf(chi2_lt_1s, df)
# check if we can accept or reject null hypothesis - chi squared
if (chi2 < chi2_CV_lt_1s):
chi2_iterp_lt_1s = "Rejected"
else:
chi2_iterp_lt_1s = "Accepted"
# check if we can accept or reject null hypothesis - chi squared - YATES
if (chi2_yates < chi2_CV_lt_1s):
chi2_iterp_lt_1s_yates = "Rejected"
else:
chi2_iterp_lt_1s_yates = "Accepted"
# two sided - we use (alpha/2) for upper tail
chi2_ut_2s = (alpha/2)
# two sided - we use (abs(1-(alpha/2))) for lower tail
chi2_lt_2s = (abs(1-(alpha/2)))
# calculater chi 2 critical value
chi2_CV_ut_2s = stats.chi2.isf(chi2_ut_2s, df)
chi2_CV_lt_2s = stats.chi2.isf(chi2_lt_2s, df)
# check if we can accept or reject null hypothesis - chi squared
if (chi2 < chi2_CV_lt_2s) or (chi2 > chi2_CV_ut_2s):
chi2_iterp_2s = "Rejected"
else:
chi2_iterp_2s = "Accepted"
# check if we can accept or reject null hypothesis - chi squared - YATES
if (chi2_yates < chi2_CV_lt_2s) or (chi2 > chi2_CV_ut_2s):
chi2_iterp_2s_yates = "Rejected"
else:
chi2_iterp_2s_yates = "Accepted"
# Likelihood ratio (G-test) - using RAW value list
likelihood_ratio = (sum(list_alllikelihood))
# lower tail, one sided
if (likelihood_ratio < chi2_CV_lt_1s):
likelihood_ratio_iterp_lt_1s = "Rejected"
else:
likelihood_ratio_iterp_lt_1s = "Accepted"
# upper tail, one sided
if (likelihood_ratio > chi2_CV_ut_1s):
likelihood_ratio_iterp_ut_1s = "Rejected"
else:
likelihood_ratio_iterp_ut_1s = "Accepted"
# two sided - we use (alpha/2) for upper tail
if (likelihood_ratio < chi2_CV_lt_2s) or (likelihood_ratio > chi2_CV_ut_2s):
likelihood_ratio_iterp_2s = "Rejected"
else:
likelihood_ratio_iterp_2s = "Accepted"
###########################################################################
# add all results to list for later printing
all_details = list()
all_details.append("\n===========================")
all_details.append("Contingency Table")
all_details.append("Table Size: '%sx%s'" % (rows, columns))
all_details.append("Number of cells: '%s'" % (tot_cells,))
all_details.append("Total number of elements: '%s'" % (totalelem,))
all_details.append("Observed minimum value: '%s'" % (observed_min,))
all_details.append("Expected minimum value: '%s'" % (expected_min,))
all_details.append("\nChi Squared")
all_details.append("Pearson chi2: '%s'" % (chi2,))
all_details.append("Pearson chi2 (std. dev): '%s'" % (chi2_stdev,))
all_details.append("Degrees of freedom (df): '%s'" % (df,))
all_details.append("p-value (Pearson chi2): '%s'" % (p_val,))
all_details.append("Critical value, Lower tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_lt_1s))
all_details.append("Critical value, Upper tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_ut_1s))
all_details.append("Critical value, two-sided (alpha=%s, df=%s, value: %s,%s" % (chi2_ut_2s, df, chi2_CV_lt_2s, chi2_CV_ut_2s))
all_details.append("Pearson chi2, Null hypothesis, Lower tail, one-sided: '%s'" % (chi2_iterp_lt_1s,))
all_details.append("Pearson chi2, Null hypothesis, Upper tail, one-sided: '%s'" % (chi2_iterp_ut_1s,))
all_details.append("Pearson chi2, Null hypothesis, Two-sided: '%s'" % (chi2_iterp_2s,))
all_details.append("\nChi Squared - Yates Continuity Corrections")
all_details.append("Yates chi2: '%s'" % (chi2_yates,))
all_details.append("Yates chi2 (std. dev): '%s'" % (chi2_stdev_yates,))
all_details.append("Degrees of freedom (df): '%s'" % (df,))
all_details.append("p-value (Yates chi2): '%s'" % (p_val_yates,))
all_details.append("Critical value, Lower tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_lt_1s))
all_details.append("Critical value, Upper tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_ut_1s))
all_details.append("Critical value, two-sided (alpha=%s, df=%s, value: %s,%s" % (chi2_ut_2s, df, chi2_CV_lt_2s, chi2_CV_ut_2s))
all_details.append("Yates chi2, Null hypothesis, Lower tail, one-sided: '%s'" % (chi2_iterp_lt_1s_yates,))
all_details.append("Yates chi2, Null hypothesis, Upper tail, one-sided: '%s'" % (chi2_iterp_ut_1s_yates,))
all_details.append("Yates chi2, Null hypothesis, Two-tailed: '%s'" % (chi2_iterp_2s_yates,))
all_details.append("\nChi Squared - Log-Likelihood ratio")
all_details.append("Log-Likelihood ratio (G-test): '%s'" % (likelihood_ratio,))
all_details.append("Critical value, Lower tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_lt_1s))
all_details.append("Critical value, Upper tail, one-sided (alpha=%s, df=%s): %s" % (alpha, df, chi2_CV_ut_1s))
all_details.append("Critical value, two-sided (alpha=%s, df=%s, value: %s,%s" % (chi2_ut_2s, df, chi2_CV_lt_2s, chi2_CV_ut_2s))
all_details.append("G-test, Null hypothesis, Lower tail, one-sided: '%s'" % (likelihood_ratio_iterp_lt_1s,))
all_details.append("G-test, Null hypothesis, Upper tail, one-sided: '%s'" % (likelihood_ratio_iterp_ut_1s,))
all_details.append("G-test, Null hypothesis, Two-tailed: '%s'" % (likelihood_ratio_iterp_2s,))
if (rows == columns):
all_details.append("\nContingency coefficient")
all_details.append("Observed contingency coefficient (C): '%s'" % (obs_cont_coeff,))
all_details.append("Observed contingency coefficient corrected (C corr): '%s'" % (obs_cont_coeff_corr,))
all_details.append("Standardized contingency coefficient (C std): '%s'" % (cont_coeff_std,))
all_details.append("Standardized contingency coefficient corrected (C corr std): '%s'" % (cont_coeff_std_corr,))
all_details.append("\nMeasures of Associations")
if (rows == 2) and (columns == 2):
all_details.append("Phi coefficient: '%s'" % (phi_coeff,))
all_details.append("Cramer's V (V): '%s'" % (cramerV,))
all_details.append("===========================\n")
if (verbose == True):
# print full table with all calculations
print(print_table_all)
# print all calculation with details
for line in all_details:
print(line)
return (chi2, p_val, df, chi2_iterp_2s)
def generalizedESD(x, maxOLs, alpha=0.05, fullOutput=False):
# SOURCE:
# PyAstronomy/src/pyasl/asl/outlier.py
# https://github.com/sczesla/PyAstronomy
"""
Carry out a Generalized ESD Test for Outliers.
The Generalized Extreme Studentized Deviate (ESD) test for
outliers can be used to search for outliers in a univariate
data set, which approximately follows a normal distribution.
A description of the algorithm is, e.g., given at
`Nist <http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>`_
or [Rosner1983]_.
Parameters
----------
maxOLs : int
Maximum number of outliers in the data set.
alpha : float, optional
Significance (default is 0.05).
fullOutput : boolean, optional
Determines whether additional return values
are provided. Default is False.
Returns
-------
Number of outliers : int
The number of data points characterized as
outliers by the test.
Indices : list of ints
The indices of the data points found to
be outliers.
R : list of floats, optional
The values of the "R statistics". Only provided
if `fullOutput` is set to True.
L : list of floats, optional
The lambda values needed to test whether a point
should be regarded an outlier. Only provided
if `fullOutput` is set to True.
"""
from scipy.stats import t
if maxOLs < 1:
raise ValueError("Maximum number of outliers, `maxOLs`, must be > 1.")
import numpy.ma as ma
xm = ma.array(x)
n = len(xm)
# Compute R-values
R = []
L = []
minds = []
for i in range(maxOLs + 1):
# Compute mean and std of x
xmean = xm.mean()
xstd = xm.std()
# Find maximum deviation
rr = np.abs((xm - xmean)/xstd)
minds.append(np.argmax(rr))
R.append(rr[minds[-1]])
if i >= 1:
p = 1.0 - alpha/(2.0*(n - i + 1))
perPoint = t.ppf(p, n-i-1)
L.append((n-i)*perPoint / np.sqrt((n-i-1+perPoint**2) * (n-i+1)))
# Mask that value and proceed
xm[minds[-1]] = ma.masked
# Remove the first entry from R, which is of
# no meaning for the test
R.pop(-1)
# Find the number of outliers
ofound = False
for i in range(maxOLs-1, -1, -1):
if R[i] > L[i]:
ofound = True
break
# Prepare return value
if ofound:
if not fullOutput:
# There are outliers
return i+1, minds[0:i+1]
else:
return i+1, minds[0:i+1], R, L, minds
else:
# No outliers could be detected
if not fullOutput:
# There are outliers
return 0, []
else:
return 0, [], R, L, minds
###############################################################################
dataset = 'marscrater_clean.csv'
data = pd.read_csv(dataset)
# setting variables to CATEGORICAL
data["CRATER_ID"] = data["CRATER_ID"].astype('category')
data["CRATER_NAME"] = data["CRATER_NAME"].astype('category')
data["MORPHOLOGY_EJECTA_1"] = data["MORPHOLOGY_EJECTA_1"].astype('category')
data["MORPHOLOGY_EJECTA_2"] = data["MORPHOLOGY_EJECTA_2"].astype('category')
data["MORPHOLOGY_EJECTA_3"] = data["MORPHOLOGY_EJECTA_3"].astype('category')
# setting variables to NUMERIC - FLOAT
data["LATITUDE_CIRCLE_IMAGE"] = data["LATITUDE_CIRCLE_IMAGE"].astype('float64')
data["LONGITUDE_CIRCLE_IMAGE"] = data["LONGITUDE_CIRCLE_IMAGE"].astype('float64')
data["DIAM_CIRCLE_IMAGE"] = data["DIAM_CIRCLE_IMAGE"].astype('float64')
data["DEPTH_RIMFLOOR_TOPOG"] = data["DEPTH_RIMFLOOR_TOPOG"].astype('float64')
# setting variables to NUMERIC - INT
data["NUMBER_LAYERS"] = data["NUMBER_LAYERS"].astype('int')
###############################################################################
# add new array to dataframe
data = pd.concat([data, pd.DataFrame(columns=['LONGITUDE_EAST_360'])], ignore_index=True)
data = pd.concat([data, pd.DataFrame(columns=['QUADRANGLE'])], ignore_index=True)
data = pd.concat([data, pd.DataFrame(columns=['HEMISPHERE'])], ignore_index=True)
data = pd.concat([data, pd.DataFrame(columns=['LAYERS'])], ignore_index=True)
data = pd.concat([data, | pd.DataFrame(columns=['LATITUDE_SOUTH_180']) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
( | pd.Index([0, 1, 2, 8, 11, 15]) | pandas.Index |
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.layers import Dropout, Dense
import pandas as pd
import numpy as np
import pickle
from imblearn.over_sampling import SMOTE
import time
start = time.time()
df = pd.read_csv('bsa_training_data.csv')
print(f'df.shape: {df.shape}')
X = df.PARTICULARS
df.Label.dropna(inplace=True)
y = df.Label.astype(str)
print(f'Total Unique Labels in Train: {len(df.Label.unique())}')
le = LabelEncoder()
y = le.fit_transform(y).astype(str)
le_name = r'Saved Models/target_label_encoder.sav'
pickle.dump(le, open(le_name, 'wb'))
print(f'type(X): {type(X)}, X.shape: {X.shape}')
cv = TfidfVectorizer(analyzer='word')
Xcv = cv.fit_transform(X).toarray()
cv_name = 'vectorizer.sav'
pickle.dump(cv, open(cv_name, 'wb'))
print(f'Xcv.shape: {Xcv.shape}, type(Xcv): {type(Xcv)}')
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=22)
X = df.PARTICULARS
df.Label.dropna(inplace=True)
y = df.Label.astype(str)
print(f'Total Unique Labels in Train: {len(df.Label.unique())}')
le = LabelEncoder()
y = le.fit_transform(y).astype(str)
le_name = 'target_label_encoder.sav'
pickle.dump(le, open(le_name, 'wb'))
print(f'type(X): {type(X)}, X.shape: {X.shape}')
cv = TfidfVectorizer(analyzer='word')
Xcv = cv.fit_transform(X).toarray()
cv_name = 'vectorizer.sav'
pickle.dump(cv, open(cv_name, 'wb'))
print(f'Xcv.shape: {Xcv.shape}, type(Xcv): {type(Xcv)}')
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42, k_neighbors=1)
Xtrain, ytrain = sm.fit_resample(Xcv, y.ravel())
Xtrain = | pd.DataFrame(Xtrain) | pandas.DataFrame |
from functools import partialmethod
import pandas as pd
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import sqlite3
import click
import json
import pkg_resources
from itertools import combinations
from q2_mlab.db.schema import RegressionScore
from q2_mlab.plotting.components import (
Mediator,
ComponentMixin,
Plottable,
ButtonComponent,
ScatterComponent,
SegmentComponent,
DataSourceComponent,
SelectComponent,
)
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models import (
ColumnDataSource,
CheckboxButtonGroup,
TextInput,
Legend,
LegendItem,
)
from bokeh.models.widgets import (
Div,
)
from bokeh.palettes import (
Category20,
Set3,
)
from bokeh.layouts import column, row
from bokeh.server.server import Server
groups = ['parameters_id', 'dataset', 'target', 'level', 'algorithm']
drop_cols = ['artifact_uuid', 'datetime', 'CV_IDX', 'id']
target_map = {
'age_v2': 'age',
'BL_AGE': 'age',
'age': 'age',
'bmi_v2': 'bmi',
'BMI': 'bmi',
'bmi': 'bmi'
}
with pkg_resources.resource_stream(
__name__, "standard_deviations.json"
) as f:
TARGET_SD = json.load(f)
def _get_standardized_mae(df_row, norm_dict):
"""
"""
mae = df_row['MAE']
target = df_row['target']
dataset = df_row['dataset']
cv_fold = df_row['CV_IDX']
level = df_row['level']
key = f"({dataset}, {target}, {level}, {cv_fold})"
sd = norm_dict.get(key, 1)
standardized_mae = mae / sd
return standardized_mae
def process_db_df(df):
# remap values for consistency
df['level'] = df['level'].replace('none', 'MG')
df['target'] = df['target'].map(target_map)
df['standardized_MAE'] = df.apply(_get_standardized_mae, axis=1,
args=(TARGET_SD,))
group_stats = df.drop(
drop_cols, axis=1
).groupby(
groups
).agg(
['var', 'mean']
)
group_stats.columns = agg_columns = ['_'.join(col).strip() for
col in group_stats.columns.values]
group_stats.reset_index(inplace=True)
min_by = ['dataset', 'target']
group_mins = group_stats[agg_columns + min_by].groupby(min_by).min()
indices = group_stats[['dataset', 'target']].to_records(
index=False).tolist()
expanded_group_mins = group_mins.loc[indices]
expanded_group_mins.index = group_stats.index
relative_group_stats = (group_stats / expanded_group_mins)[agg_columns]
relative_group_stats.columns = ['relative_' + col for
col in relative_group_stats]
group_stats = group_stats.join(relative_group_stats)
return group_stats
def find_segments(group_stats, across, groupby):
"""
TODO makes some assumptions about the guarantees on pairs when there are
more than 2 categories
"""
seg_cols = groupby.copy()
seg_cols.remove(across)
group_counts = group_stats[seg_cols + [across]].groupby(seg_cols).count()
max_n_pairs = group_counts[across].max()
category_values = group_stats[across].unique()
where = (group_counts[across] == max_n_pairs)
keep_repeats = group_stats.set_index(seg_cols).loc[where]
keep_repeats_parts = []
for i, sub_group in enumerate(category_values):
where = keep_repeats[across] == sub_group
keep_repeats_parts.append(keep_repeats.loc[where])
keep_repeats_parts[i].columns = [col + '_' + sub_group for
col in keep_repeats_parts[i].columns]
segment_df = pd.concat(keep_repeats_parts,
axis=1
)
return segment_df
class TextInputComponent(ComponentMixin):
def __init__(self, text_input_kwargs):
super().__init__()
self.text_input = TextInput(**text_input_kwargs)
self.layout = self.text_input
self.input_text_callback = None
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'text-change'
text_change = self.make_attr_old_new_callback(event_name)
self.input_text_callback = text_change
self.text_input.on_change('value', self.input_text_callback)
class CheckboxButtonGroupComponent(ComponentMixin):
def __init__(self, checkbox_kwargs):
super().__init__()
self.checkbox = CheckboxButtonGroup(**checkbox_kwargs)
self.checkbox_change = None
self.layout = self.checkbox
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'checkbox-change'
self.checkbox_change = self.make_attr_old_new_callback(event_name)
self.checkbox.on_change('active', self.checkbox_change)
class SegmentComponentExt(SegmentComponent):
def redraw(self, x, y, seg_0, seg_1, data):
self.data_source.data = data
self.segment.glyph.x0 = '_'.join([x, seg_0])
self.segment.glyph.x1 = '_'.join([x, seg_1])
self.segment.glyph.y0 = '_'.join([y, seg_0])
self.segment.glyph.y1 = '_'.join([y, seg_1])
palettes = {
'Category20': Category20,
'Set3': Set3,
}
DEFAULTS = {
'segment_variable': 'dataset',
'x': 'MAE_mean',
'y': 'MAE_var',
'x_axis_type': 'log',
'y_axis_type': 'log',
'cmap': 'Category20'
}
class AlgorithmScatter(Mediator, Plottable):
def __init__(self, x, y, engine, cmap=None):
super().__init__()
self.x = x
self.y = y
self.engine = engine
self.data = None
self.scatter = None
if cmap is None:
self.cmap = Category20
else:
self.cmap = cmap
self.line_segment_variable = DEFAULTS['segment_variable']
self.data_raw = None
self.data_static = None
self.data = None
self.seg_0, self.seg_1 = None, None
self.scatter_source = None
self.x_axis_type = DEFAULTS['x_axis_type']
self.y_axis_type = DEFAULTS['y_axis_type']
self.axis_types = ['linear', 'log']
self.line_segment_pairs = {
'dataset': ['finrisk', 'sol'],
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
self.scatter_tools = 'pan,wheel_zoom,box_select,lasso_select,'\
'reset,box_zoom,save'
self.segment = None
self.segment_source = None
self.segment_button = None
self.segment_variable_select = None
self.x_var_select = None
self.y_var_select = None
self.dataset_bars = None
self.dataset_bars_source = None
self.dataset_bars_figure = None
self.level_bars = None
self.level_bars_source = None
self.level_bars_figure = None
self.target_bars = None
self.target_bars_source = None
self.target_bars_figure = None
self.query_button = None
self.query_input = None
self.query_row = None
self.layout = None
def notify(self,
component,
event_name,
*args, **kwargs,
):
if (event_name == 'dropdown-select') and \
(component is self.x_var_select):
self.x = component.select.value
self.scatter.scatter.glyph.x = self.x
self.scatter.layout.xaxis.axis_label = self.x
self.segment.segment.glyph.x0 = '_'.join([self.x, self.seg_0])
self.segment.segment.glyph.x1 = '_'.join([self.x, self.seg_1])
if (event_name == 'dropdown-select') and \
(component is self.y_var_select):
self.y = component.select.value
self.scatter.scatter.glyph.y = self.y
self.scatter.layout.yaxis.axis_label = self.y
self.segment.segment.glyph.y0 = '_'.join([self.y, self.seg_0])
self.segment.segment.glyph.y1 = '_'.join([self.y, self.seg_1])
if (event_name == 'selected-indices') and \
(component is self.scatter_source):
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'button-click') and \
(component is self.query_button):
df = self.handle_query(self.query_input.text_input.value)
# need to update self.data due to how the hbars are currently
# written
self.data = df
self.scatter_source.data_source.data = df.to_dict(orient='list')
segment_source = find_segments(
df,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target'],
)
self.segment.segment.data_source.data = segment_source.to_dict(
orient='list',
)
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'checkbox-change') and \
(component is self.segment_button):
active = self.segment_button.checkbox.active
if 0 in active:
self.segment.segment.visible = True
else:
self.segment.segment.visible = False
if (event_name == 'dropdown-select') and \
(component is self.segment_variable_select):
new_segment_variable = self.segment_variable_select.select.value
self.line_segment_variable = new_segment_variable
new_segment_data = find_segments(
self.data,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target']
)
line_segment_ends = self.line_segment_pairs[new_segment_variable]
self.segment.redraw(
self.x,
self.y,
*line_segment_ends,
new_segment_data
)
def plot(self):
self.data_raw = pd.read_sql_table(RegressionScore.__tablename__,
con=self.engine,
)
# TODO this is temporary
self.data_raw = self.data_raw.loc[
self.data_raw['algorithm'] != 'MLPRegressor'
]
self.data = df = process_db_df(self.data_raw)
self.data_static = df
self.seg_0, self.seg_1 = self.line_segment_pairs[
self.line_segment_variable
]
# ## Data Setup
scatter_source = ColumnDataSource(df)
self.scatter_source = DataSourceComponent(scatter_source)
self.scatter_source.set_mediator(self)
# ## General Setup
algorithms = sorted(df['algorithm'].unique())
levels = sorted(df['level'].unique())
datasets = sorted(df['dataset'].unique())
targets = sorted(df['target'].unique())
plot_width = 600
self.line_segment_pairs = {
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
dataset_combinations = combinations(["finrisk", "imsms", "sol"], r=2)
for dataset_pair in dataset_combinations:
d1, d2 = dataset_pair
self.line_segment_pairs[f"{d1}-to-{d2}"] = [d1, d2]
categorical_variables = ['parameters_id', 'target', 'algorithm',
'level', 'dataset']
plottable_variables = list(sorted(
df.columns.drop(categorical_variables)
))
color_scheme = self.cmap[len(algorithms)]
algorithm_cmap = factor_cmap('algorithm', palette=color_scheme,
factors=algorithms,
)
figure_kwargs = dict(x_axis_type=self.x_axis_type,
y_axis_type=self.y_axis_type,
plot_height=400,
tools=self.scatter_tools,
output_backend='webgl',
)
# ## Segment Plot
segment_source = ColumnDataSource(
find_segments(self.data, across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level',
'dataset']
)
)
self.segment_source = DataSourceComponent(scatter_source)
self.segment = SegmentComponentExt(data_source=segment_source)
segment_kwargs = {
'x0': self.x + '_' + self.seg_0,
'x1': self.x + '_' + self.seg_1,
'y0': self.y + '_' + self.seg_0,
'y1': self.y + '_' + self.seg_1,
'line_width': 0.1,
'line_color': '#A9A9A9',
}
self.segment.plot(
figure_kwargs=figure_kwargs,
segment_kwargs=segment_kwargs,
)
# ## Segment Visible button
self.segment_button = CheckboxButtonGroupComponent(
checkbox_kwargs=dict(
labels=['Segments'],
active=[0],
)
)
self.segment_button.set_mediator(self)
self.segment_variable_select = SelectComponent(
select_kwargs=dict(
value=self.line_segment_variable,
title='Segment Variable',
options=list(self.line_segment_pairs.keys()),
)
)
self.segment_variable_select.set_mediator(self)
# ## Scatter plot
self.scatter = ScatterComponent()
scatter_kwargs = dict(x=self.x, y=self.y, source=scatter_source,
# legend_field='algorithm',
fill_color=algorithm_cmap,
name='scatter',
)
self.scatter.plot(
figure=self.segment.layout,
scatter_kwargs=scatter_kwargs,
)
scatter = self.scatter.layout
scatter.toolbar.logo = None
scatter.xaxis.axis_label = self.x
scatter.yaxis.axis_label = self.y
self.scatter.scatter.glyph.line_color = 'white'
self.scatter.scatter.glyph.line_width = 0.1
self.scatter.scatter.nonselection_glyph.line_color = 'white'
transform = algorithm_cmap['transform']
legend_fig = figure(outline_line_alpha=0, toolbar_location=None)
legend_items = []
for i, (alg, color) in enumerate(zip(transform.factors,
transform.palette)):
legend_fig.circle(fill_color=color, name=f'circ{i}',
line_color='white',
)
renderers = legend_fig.select(name=f'circ{i}')
legend_item = LegendItem(
label=alg,
renderers=renderers,
)
legend_items.append(legend_item)
legend = Legend(
items=legend_items,
location='top_left',
)
legend_fig.add_layout(legend)
scatter.plot_width = plot_width
scatter.plot_height = 500
# ## Variable Selection
self.x_var_select = SelectComponent(
select_kwargs=dict(
value=self.x,
title='X variable',
options=plottable_variables
)
)
self.x_var_select.set_mediator(self)
x_select = self.x_var_select.select
self.y_var_select = SelectComponent(
select_kwargs=dict(
value=self.y,
title='Y variable',
options=plottable_variables
)
)
self.y_var_select.set_mediator(self)
y_select = self.y_var_select.select
# ## Dataset Stacked Hbars
data_getter = self.get_dataset_counts
self.dataset_bars_source = ColumnDataSource(data_getter())
self.dataset_bars_figure = figure(y_range=datasets, plot_height=100)
self.dataset_bars = self.dataset_bars_figure.hbar_stack(
algorithms, y='dataset',
height=0.9,
color=color_scheme,
source=self.dataset_bars_source,
)
self.dataset_bars_figure.toolbar_location = None
self.dataset_bars_figure.plot_width = plot_width
# ## Level Stacked Hbars
data_getter = self.get_level_counts
self.level_bars_source = ColumnDataSource(data_getter())
self.level_bars_figure = figure(y_range=levels, plot_height=100)
self.level_bars = self.level_bars_figure.hbar_stack(
algorithms, y='level',
height=0.9,
color=color_scheme,
source=self.level_bars_source,
)
self.level_bars_figure.toolbar_location = None
self.level_bars_figure.plot_width = plot_width
# ## Target Stacked Hbars
data_getter = self.get_target_counts
self.target_bars_source = ColumnDataSource(data_getter())
self.target_bars_figure = figure(y_range=targets, plot_height=100)
self.target_bars = self.target_bars_figure.hbar_stack(
algorithms, y='target',
height=0.9,
color=color_scheme,
source=self.target_bars_source,
)
self.target_bars_figure.toolbar_location = None
self.target_bars_figure.plot_width = plot_width
# ## Text input
button_width = 100
self.query_input = TextInputComponent(
text_input_kwargs=dict(
title='Enter query',
width=plot_width - button_width
)
)
self.query_button = ButtonComponent(
button_kwargs=dict(
label='Execute',
width=button_width,
)
)
self.query_button.set_mediator(self)
self.query_row = row(self.query_input.layout,
column(
Div(text="", height=8),
self.query_button.layout,
))
# ## Layout
variable_selection = row(x_select, y_select,
)
segment_selection = row(
self.segment_variable_select.layout,
column(
Div(text="", height=8),
self.segment_button.layout,
)
)
self.layout = row(
column(
self.query_row,
variable_selection,
segment_selection,
row(
scatter,
column(
self.dataset_bars_figure,
self.level_bars_figure,
self.target_bars_figure,
legend_fig,
),
),
),
)
return self
def handle_query(self, text):
if text != '':
df = self.data_static.query(text).reset_index(drop=True)
else:
df = self.data_static
return df
def get_counts_by(self, category, by, indices=None):
# TODO consider switching orientation of counts and by
data = self.subset_selected(indices)
counts = | pd.crosstab(data[by], data[category]) | pandas.crosstab |
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from parameters_cov import params
import pandas as pd
import numpy as np
from math import ceil
import datetime
from data_constants import POPULATIONS
month_len = 365/12
longname = {'S': 'Susceptible',
'I': 'Infected',
'R': 'Recovered (total)',
'H': 'Hospitalised',
'C': 'Critical',
'D': 'Deaths (total)',
}
index = {'S': params.S_L_ind,
'I': params.I_L_ind,
'R': params.R_L_ind,
'H': params.H_L_ind,
'C': params.C_L_ind,
'D': params.D_L_ind,
}
colors = {'S': 'blue',
'I': 'orange',
'R': 'green',
'H': 'red',
'C': 'black',
'D': 'purple',
}
########################################################################################################################
def human_format(num,dp=0):
if num<1 and num>=0.1:
return '%.2f' % num
elif num<0.1:
return '%.3f' % num
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
if dp==0 and not num/10<1:
return '%.0f%s' % (num, ['', 'K', 'M', 'B'][magnitude])
else:
return '%.1f%s' % (num, ['', 'K', 'M', 'B'][magnitude])
########################################################################################################################
def time_exceeded_function(yy,tt,ICU_grow):
ICU_capac = [params.ICU_capacity*(1 + ICU_grow*time/365 ) for time in tt]
Exceeded_vec = [ (yy[params.C_H_ind,i]+yy[params.C_L_ind,i]) > ICU_capac[i] for i in range(len(tt))]
Crit_vals = [ (yy[params.C_H_ind,i]+yy[params.C_L_ind,i]) for i in range(len(tt))]
c_low = [-2]
c_high = [-1]
ICU = False
if max(Crit_vals)>params.ICU_capacity:
if Exceeded_vec[0]: # if exceeded at t=0
c_low.append(0)
for i in range(len(Exceeded_vec)-1):
if not Exceeded_vec[i] and Exceeded_vec[i+1]: # entering
ICU = True
y1 = 100*(yy[params.C_H_ind,i]+yy[params.C_L_ind,i])
y2 = 100*(yy[params.C_H_ind,i+1]+yy[params.C_L_ind,i+1])
t1 = tt[i]
t2 = tt[i+1]
t_int = t1 + (t2- t1)* abs((100*0.5*(ICU_capac[i]+ICU_capac[i+1]) - y1)/(y2-y1))
c_low.append(t_int) # 0.5 * ( tt[i] + tt[i+1]))
if Exceeded_vec[i] and not Exceeded_vec[i+1]: # leaving
y1 = 100*(yy[params.C_H_ind,i]+yy[params.C_L_ind,i])
y2 = 100*(yy[params.C_H_ind,i+1]+yy[params.C_L_ind,i+1])
t1 = tt[i]
t2 = tt[i+1]
t_int = t1 + (t2- t1)* abs((100*0.5*(ICU_capac[i]+ICU_capac[i+1]) - y1)/(y2-y1))
c_high.append(t_int) # 0.5 * ( tt[i] + tt[i+1]))
if len(c_low)>len(c_high):
c_high.append(tt[-1]+1)
return c_low, c_high, ICU
########################################################################################################################
def extract_info(yy,tt,t_index,ICU_grow):
###################################################################
# find percentage deaths/critical care
metric_val_L_3yr = yy[params.D_L_ind,t_index-1]
metric_val_H_3yr = yy[params.D_H_ind,t_index-1]
###################################################################
ICU_val_3yr = [yy[params.C_H_ind,i] + yy[params.C_L_ind,i] for i in range(t_index)]
ICU_capac = [params.ICU_capacity*(1 + ICU_grow*time/365 ) for time in tt]
ICU_val_3yr = max([ICU_val_3yr[i]/ICU_capac[i] for i in range(t_index)])
###################################################################
# find what fraction of herd immunity safe threshold reached
herd_val_3yr = [yy[params.S_H_ind,i] + yy[params.S_L_ind,i] for i in range(t_index)]
herd_lim = 1/(params.R_0)
herd_fraction_out = min((1-herd_val_3yr[-1])/(1-herd_lim),1)
###################################################################
# find time ICU capacity exceeded
time_exc = 0
# if True:
c_low, c_high, _ = time_exceeded_function(yy,tt,ICU_grow)
time_exc = [c_high[jj] - c_low[jj] for jj in range(1,len(c_high)-1)]
time_exc = sum(time_exc)
if c_high[-1]>0:
if c_high[-1]<=tt[-1]:
time_exc = time_exc + c_high[-1] - c_low[-1]
else:
time_exc = time_exc + tt[-1] - c_low[-1]
time_exc = time_exc/month_len
###################################################################
# find herd immunity time till reached
multiplier_95 = 0.95
threshold_herd_95 = (1-multiplier_95) + multiplier_95*herd_lim
time_reached = 50 # i.e never reached unless below satisfied
if herd_val_3yr[-1] < threshold_herd_95:
herd_time_vec = [tt[i] if herd_val_3yr[i] < threshold_herd_95 else 0 for i in range(len(herd_val_3yr))]
herd_time_vec = np.asarray(herd_time_vec)
time_reached = min(herd_time_vec[herd_time_vec>0])/month_len
return metric_val_L_3yr, metric_val_H_3yr, ICU_val_3yr, herd_fraction_out, time_exc, time_reached
########################################################################################################################
def Bar_chart_generator(data,data2 = None, data_group = None,name1=None,name2=None,preset=None,text_addition=None,color=None,y_title=None,yax_tick_form=None,maxi=True,yax_font_size_multiplier=None,hover_form=None): # ,title_font_size=None): #title
font_size = 10
if yax_font_size_multiplier is None:
yax_font_size = font_size
else:
yax_font_size = yax_font_size_multiplier*font_size
ledge = None
show_ledge = False
if len(data)==2:
cats = ['Strategy Choice','Do Nothing']
else:
cats = ['Strategy One','Strategy Two','Do Nothing']
order_vec = [len(data)-1,0,1]
order_vec = order_vec[:(len(data))]
data1 = [data[i] for i in order_vec]
cats = [cats[i] for i in order_vec]
if data2 is not None:
data2 = [data2[i] for i in order_vec]
if data_group is not None:
name1 = 'End of Year 1'
trace0 = go.Bar(
x = cats,
y = data1,
marker=dict(color=color),
name = name1,
hovertemplate=hover_form
)
traces = [trace0]
barmode = None
if data_group is not None:
data_group = [data_group[i] for i in order_vec]
traces.append( go.Bar(
x = cats,
y = data_group,
# marker=dict(color=color),
name = 'End of Year 3',
hovertemplate=hover_form
))
barmode='group'
show_ledge = True
if data2 is not None:
traces.append(go.Bar(
x = cats,
y = data2,
hovertemplate=hover_form,
name = name2)
)
show_ledge = True
if show_ledge:
ledge = dict(
font=dict(size=font_size),
x = 0.5,
y = 1.02,
xanchor= 'center',
yanchor= 'bottom',
)
# cross
if data_group is not None:
data_use = data_group
elif data2 is not None:
data_use = [data1[i] + data2[i] for i in range(len(data1))]
else:
data_use = data1
counter_bad = 0
counter_good = 0
if len(data_use)>1:
for i, dd in enumerate(data_use):
if maxi and dd == max(data_use):
worst_cat = cats[i]
worst_cat_y = dd
counter_bad += 1
if maxi and dd == min(data_use):
best_cat = cats[i]
best_cat_y = dd
counter_good += 1
if not maxi and dd == min(data_use):
worst_cat = cats[i]
worst_cat_y = dd
counter_bad += 1
if not maxi and dd == max(data_use):
best_cat = cats[i]
best_cat_y = dd
counter_good += 1
if counter_bad<2:
traces.append(go.Scatter(
x= [worst_cat],
y= [worst_cat_y/2],
mode='markers',
marker_symbol = 'x',
marker_size = (30/20)*font_size,
marker_line_width=1,
opacity=0.5,
marker_color = 'red',
marker_line_color = 'black',
hovertemplate='Worst Strategy',
showlegend=False,
name = worst_cat
))
if counter_good<2:
traces.append(go.Scatter(
x= [best_cat],
y= [best_cat_y/2],
opacity=0.5,
mode = 'text',
text = [r'✅'],
textfont= dict(size= (30/20)*font_size),
hovertemplate='Best Strategy',
showlegend=False,
name = best_cat
))
layout = go.Layout(
# autosize=False,
font = dict(size=font_size),
barmode = barmode,
template="simple_white", #plotly_white",
yaxis_tickformat = yax_tick_form,
height=450,
legend = ledge,
# xaxis=dict(showline=False),
yaxis = dict(
automargin = True,
# showline=False,
title = y_title,
title_font = dict(size=yax_font_size),
),
showlegend = show_ledge,
transition = {'duration': 500},
)
return {'data': traces, 'layout': layout}
########################################################################################################################
def solnIntoDataframe(sol,startdate):
time = pd.Series([startdate + datetime.timedelta(days=i) for i in sol['t']])
df = pd.DataFrame(time)
df.columns = ['t']
sol['y'] = np.asarray(sol['y'])
for name in index.keys():
y_Low = 100* | pd.Series(sol['y'][index[name],:]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 13:30:31 2020
@author: User
"""
import sys
import datetime as dt
from collections import Counter
import pprint
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import os
from platform import system
import glob
import cycler
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from bs4 import BeautifulSoup
import re
from scipy.stats import linregress
# from sklearn import linear_model
import scipy.signal
import itertools
from itertools import chain, repeat
import logging
import datetime as dt
from pathlib import Path
# import h5py
from multiprocessing import Pool, cpu_count
# import timeit
# import time
matplotlib.rcParams.update({"font.size": 16})
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
try:
import statsmodels.formula.api as smf
import statsmodels.api as sm
import seaborn as sns
except Exception as e:
print("No modules: %s" % e)
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.experiments.EIS.models import Model_Collection
import post_helper
import merger
# import EC
# sys.path.append(list(FH_path.rglob('*.py')))
# import FH_path.joinpath('FindExpFolder.py')
# import FindExpFolder.py
# from FileHelper import FindExpFolder
# from FindExpFolder import *
# from .experiments import EIS
# from .runEC import run_PAR_DW
from elchempy.runEC.EC_logging_config import start_logging
# logger = start_logging(__name__)
else:
# print('\n\n***** run_PAR_DW *****')
print(f"File: {__file__}, Name:{__name__}, Package:{__package__}")
# FH_path = Path(__file__).parent.parent.parent
# sys.path.append(str(FH_path))
# import FileHelper
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.runEC.EC_logging_config import start_logging
from elchempy.PostEC import post_helper, merger
from elchempy.experiments.EIS.models import Model_Collection
# logger = start_logging(__name__)
_logger = logging.getLogger(__name__)
_logger.setLevel(20)
EvRHE = "E_AppV_RHE"
class PostEC:
AllColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"E_Applied_VRHE",
"j A/cm2",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
DropColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
KeepColls = [
"E_AppV_RHE",
"jmAcm-2",
"Jcorr",
"J_N2_scan",
"Jkin_max",
"Jkin_min",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
# SampleCodes = FindExpFolder.LoadSampleCode()
# FindExpFolder('VERSASTAT').SampleCodeLst
# PostDestDir.mkdir(parents=True,exist_ok=True)
# ExpPARovv = EC_loadOVV()
# OnlyRecentMissingOVV = runEC.MainPrepareList()
# ExpPARovv = ExpPARovv.iloc[100:120]
OutParsID = pd.DataFrame()
# Go1, Go2, Go3 = True, False, False
# Go1, Go2, Go3 = False, True, False
Go1, Go2, Go3 = False, False, True
# KL_coeff = KL_coefficients()
EvRHE_List = [
0,
0.1,
0.2,
0.3,
0.4,
0.45,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.9,
1,
]
def __init__(self):
self.DestDir = FindExpFolder("VERSASTAT").PostDir
@staticmethod
def StartLogging(level_log="INFO"):
# level_log = kwargs['level']
log_fn = FindExpFolder("VERSASTAT").PostDir.joinpath("PostEC_logger.log")
logging.basicConfig(
filename=log_fn,
filemode="w",
level=level_log,
format="%(asctime)s %(levelname)s, %(lineno)d: %(message)s",
)
logging.warning("Started logging for PostEC script...")
def applyParallel(dfGrouped, func):
with Pool(cpu_count() - 1) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return ret_list
def check_status(file, verbose=False):
"""Check status will return (status,extra) of filename"""
PAR_file_test = Path(str(file)).stem
match = [
re.search("(?<!VERS|Vers)(AST|postAST|pAST)", str(a))
for a in PAR_file_test.split("_")
]
if any(match):
status = "EoL"
extra = [
a
for a in PAR_file_test.split("_")
if [i for i in match if i][0][0] in a
]
if verbose:
print(file, status, *extra)
return status, extra[0]
# if any([re.search(r'', i) for i in str(Path(str(file)).stem.split('_'))]):
else:
return "BoL", 0
# status =
# extra = [0]
# return status,extra
def postEC_Status(files, verbose=False):
# files = ['N2_HER_1500rpm_JOS6_pAST-sHA_285_#3_Disc_Parstat']
if len(files) > 1:
status_lst, extra_lst = [], []
for file in files:
status, extra = PostEC.check_status(file)
status_lst.append(status)
extra_lst.append(extra)
return status_lst, extra_lst
else:
return PostEC.check_status(files)
def OLD_PostOrganizeFolders(TakeRecentList=True):
postOVV = []
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PAR_version = FileOperations.version
RunOVV_fn_opts = list(
FindExpFolder("VERSASTAT").DestDir.rglob(
"RunOVV_v{0}.xlsx".format(PAR_version)
)
)
RunOVV_fn = [i for i in RunOVV_fn_opts if not "_Conflict" in i.stem][0]
if RunOVV_fn.is_file() and TakeRecentList == True:
OvvFromFile = pd.read_excel(RunOVV_fn, index_col=[0])
status, extra = PostEC.postEC_Status(OvvFromFile.PAR_file.values)
OvvFromFile = OvvFromFile.assign(
**{
"Date_PAR_EXP": OvvFromFile.PAR_date - OvvFromFile.EXP_date,
"Status": status,
"Extra": extra,
}
)
OnlyRecentMissingOVV = OvvFromFile
# OvvFromFile['Date_PAR_EXP'] = OvvFromFile.PAR_date-OvvFromFile.EXP_date
# OvvFromFile['Status'] = OvvFromFile.PAR_file.values
print("EC OVV loaded from file:{0}".format(RunOVV_fn))
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, ["Dest_dir", "EXP_dir", "PAR_file"]
)
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
# CS_parts_pOVV = FileOperations.find_CS_parts(OnlyRecentMissingOVV.Dest_dir.iloc[0])
# chLst =[]
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in OnlyRecentMissingOVV.Dest_dir.values]
# OnlyRecentMissingOVV['Dest_dir'] = chLst
# else:
# pass
postOVVlst, outLst = [], []
postOVVcols = [
"DestFilename",
"SampleID",
"Status",
"Status_extra",
"Electrolyte",
"Gas",
"RPM",
"Scanrate",
"EXP_date",
"Type_Exp",
"SourceFilename",
"Exp_dir",
]
# postOVVout = PostEC.FromListgrp(group)
# postOVVlst = PostEC.applyParallel(OnlyRecentMissingOVV.groupby('Dest_dir'),PostEC.FromListgrp)
# postOVVlst = [outLst.append(PostEC.FromListgrp(i)) for i in OnlyRecentMissingOVV.groupby('Dest_dir')]
# for i in OnlyRecentMissingOVV.groupby('Dest_dir'):
# PostEC.FromListgrp(i)
# try:
# postOVVout = pd.DataFrame(postOVVlst,columns=)
# except Exception as e:
# postOVVout = pd.DataFrame(postOVVlst)
# for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir']):
# PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])
# pass
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir'])]
postOVVout = pd.concat(
[pd.DataFrame(i, columns=postOVVcols) for i in outLst],
sort=False,
ignore_index=True,
)
postOVVout.to_excel(PostDestDir.joinpath("postEC_Organized.xlsx"))
return postOVVout
class EnterExitLog:
def __init__(self, funcName):
self.funcName = funcName
def __enter__(self):
_logger.info(f"Started: {self.funcName}")
self.init_time = dt.datetime.now()
return self
def __exit__(self, type, value, tb):
self.end_time = dt.datetime.now()
self.duration = self.end_time - self.init_time
_logger.info(f"Finished: {self.funcName} in {self.duration} seconds")
def func_timer_decorator(func):
def func_wrapper(*args, **kwargs):
with EnterExitLog(func.__name__):
return func(*args, **kwargs)
return func_wrapper
def get_daily_pickle(exp_type=""):
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(daily_pkl_options, key=lambda x: x.stat().st_ctime)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
}
)
if "EIS" in exp_type:
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_BRUTE_{system()}.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_RAW_WB_{system()}.pkl.compress"
),
}
)
return _result
def _collect_test():
tt = CollectLoadPars(load_type="fast")
class CollectLoadPars:
def __init__(self, load_type="fast"):
self.load_type = load_type
self.load_pars()
self.collect_dict()
def load_pars(self):
_BaseLoad = BaseLoadPars()
_kws = {"EC_index": _BaseLoad.EC_index, "SampleCodes": _BaseLoad.SampleCodes}
if "fast" in self.load_type:
_kws.update(**{"reload": False, "reload_raw": False})
self.EIS_load = EIS_LoadPars(**_kws)
self.ORR_load = ORR_LoadPars(**_kws)
self.N2_load = N2_LoadPars(**_kws)
def collect_dict(self):
_load_attrs = [i for i in self.__dict__.keys() if i.endswith("_load")]
_collect = {}
for _load_pars in _load_attrs:
_pars_name = f'{_load_pars.split("_")[0]}_pars'
if hasattr(getattr(self, _load_pars), _pars_name):
_pars = getattr(getattr(self, _load_pars), _pars_name)
_collect.update({_pars_name: _pars})
self.pars_collection = _collect
class BaseLoadPars:
_required_funcs = [
"make_raw_pars_from_scratch",
"edit_raw_columns",
"search_pars_files",
"read_in_pars_files",
"extra_stuff_delegator",
]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
reload=False,
reload_raw=False,
):
self.exp_type = exp_type
self._auto_set_exp_type()
self.EC_index = EC_index
self.SampleCodes = SampleCodes
self._check_class_req_functions()
self.check_EC_index()
self.set_OVV_exp_type()
self._reload = reload
self._reload_raw = reload_raw
self.get_daily_pickle()
if self.exp_type:
self.load_delegator()
def _auto_set_exp_type(self):
_cls_name = self.__class__.__name__
if "_" in _cls_name:
_cls_exp_type = _cls_name.split("_")[0]
_exp_type = f"{_cls_exp_type}_pars"
self.exp_type = _exp_type
def check_EC_index(self):
if self.EC_index.empty:
EC_index = ECRunOVV(load=1).EC_index
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
self.EC_index = EC_index
if self.SampleCodes.empty:
SampleCodes = FindExpFolder().LoadSampleCode()
self.SampleCodes = SampleCodes
# SampleCodesChar().load
def set_OVV_exp_type(self):
if not self.EC_index.empty and self.exp_type:
PAR_exp_uniq = self.EC_index.PAR_exp.unique()
PAR_match = [
parexp
for parexp in PAR_exp_uniq
if self.exp_type.split("_")[0] in parexp
]
self.exp_type_match = PAR_match
# if PAR_match:
EC_index_exp = self.EC_index.loc[self.EC_index.PAR_exp.isin(PAR_match)]
self.EC_index_exp = EC_index_exp
if EC_index_exp.empty:
_logger.error(f'set_OVV_exp_type "{self.__class__.__name__}" empty')
self.EC_index_exp_destdirs = EC_index_exp.Dest_dir.unique()
def get_daily_pickle(self):
exp_type = self.exp_type
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(
daily_pkl_options, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
if not daily_pkl_options and not self._reload_raw:
self._reload_raw = True
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
_pickle_path_RAW_read_in = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{exp_type}_{system()}_RAW_read_in.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
"pkl_path_RAW_read_in": _pickle_path_RAW_read_in,
}
)
if "EIS" in exp_type:
daily_pkl_options_RAW_WB = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW_WB.pkl.compress"
)
)
daily_pkl_options_RAW_WB = sorted(
daily_pkl_options_RAW_WB, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_BRUTE.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder(
"VERSASTAT"
).PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW_WB.pkl.compress"
),
"daily_options_RAW_WB": daily_pkl_options_RAW_WB,
}
)
self.daily_pickle_path = _result
def load_delegator(self):
setattr(self, self.exp_type, pd.DataFrame())
if self._reload:
if self._reload_raw:
self.make_raw_pars_from_scratch()
else:
self.read_in_daily_raw()
if hasattr(self, "edit_raw_columns"):
try:
self.edit_raw_columns()
except Exception as e:
_logger.warning(
f'edit_raw_columns in load_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
self.save_daily_pars()
else:
self.read_in_daily_pars()
try:
self.extra_stuff_delegator()
except Exception as e:
_logger.warning(
f'extra_stuff_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
def _check_class_req_functions(self):
for _f in self._required_funcs:
if not hasattr(self, _f) and "BaseLoadPars" not in self.__class__.__name__:
_logger.warning(
f'Class "{self.__class__.__name__}" is missing required func: "{_f}"'
)
def save_daily_pars(self):
pars = getattr(self, self.exp_type)
pars.to_pickle(self.daily_pickle_path["daily_path"])
_logger.info(
f'{self.exp_type} len({len(pars)}) OVV to daily pickle: {self.daily_pickle_path.get("daily_path")}'
)
def read_in_daily_pars(self):
if self.daily_pickle_path.get("daily_options"):
_pars_fp = self.daily_pickle_path.get("daily_options")[-1]
_logger.info(
f"start read_in_daily_pars {self.exp_type} pars OVV from daily {_pars_fp} "
)
_pars = pd.read_pickle(_pars_fp)
try:
_pars = FileOperations.ChangeRoot_DF(_pars, [], coltype="string")
setattr(self, self.exp_type, _pars)
_logger.info(f"Loaded {self.exp_type} pars OVV from daily {_pars_fp} ")
except Exception as e:
_pars = pd.DataFrame()
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp} {e} "
)
else:
_pars = pd.DataFrame()
_pars_fp = "options empty list"
if _pars.empty:
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp}: empty "
)
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", _pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW_read_in.to_pickle(_raw_read_fp)
def read_in_daily_raw(self):
_raw_fp = self.daily_pickle_path.get("daily_options_RAW")[-1]
_pars_RAW = pd.read_pickle(_raw_fp)
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
if not "level_0" in _pars_RAW.columns:
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(f"Loaded raw df {self.exp_type} from daily {_raw_fp} ")
def save_daily_raw(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.to_pickle(self.daily_pickle_path.get("daily_path_RAW"))
_logger.info(
f'{self.exp_type} OVV to daily pickle: {self.daily_pickle_path.get("daily_path_RAW")}'
)
def set_gen_raw_fls(self):
_par_files = [
list(self.search_pars_files(d)) for d in self.EC_index_exp_destdirs
]
self._par_files = _par_files
if not _par_files:
_logger.warning(f"{self.exp_type} set_gen_raw_fls: list empty ")
self._par_fls_gen = (a for i in self._par_files for a in i)
@func_timer_decorator
def generate_raw_df(self):
if not hasattr(self, "_par_fls_gen"):
self.set_gen_raw_fls()
_pars_lst = list(self.read_in_pars_files(self._par_fls_gen))
try:
_pars_RAW = pd.concat(_pars_lst, sort=False)
except Exception as e:
_pars_RAW = pd.DataFrame()
_logger.warning(f"{self.exp_type} generate_raw_df: {e}")
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
@staticmethod
def get_source_meta(filepath):
i = filepath
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_meta_res = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
return _meta_res
def extra_stuff_delegator(self):
_extra_funcs = [i for i in self.__dict__.keys() if i.startswith("_extra")]
for _func in _extra_funcs:
try:
func = getattr(self, _func)
func()
# self._extra_plotting()
except Exception as e:
_logger.info(
f"{self.__class__.__name__} Extra stuff failed because {e}"
)
def _testing():
tt = EIS_LoadPars(reload=False, reload_raw=False)
tt._reload_raw
self = tt
self.load_delegator()
self.make_raw_pars_from_scratch()
class EIS_LoadPars(BaseLoadPars):
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="EIS_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_meta = self.get_source_meta(i)
_pp = _pp.assign(**_meta)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def search_pars_files(self, _dest_dir):
return Path(_dest_dir.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
self._load_WB_delegator()
self._merge_WB_pars_raw()
self._raw_finish_edit_columns()
self.save_daily_raw()
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
EIS_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
EIS_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_RAW_read_in.to_pickle(_raw_read_fp)
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def _raw_extra_steps(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[a for i in float_cols for a in EIS_pars_all.columns if a.startswith(i)]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(0)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
wrong_fls = [
EIS_pars_all.loc[EIS_pars_all[i].astype(str).str.contains("Parameter")]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _load_WB_delegator(self):
daily_options_WB = self.daily_pickle_path.get("daily_options_RAW_WB")
if daily_options_WB:
_WB_RAW_daily_path = daily_options_WB[-1]
if _WB_RAW_daily_path.exists() and not (self._reload or self._reload_raw):
_EIS_WB_pars_all = pd.read_pickle(_WB_RAW_daily_path)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
else:
self.reload_raw_WB_df()
else:
self.reload_raw_WB_df()
def reload_raw_WB_df(self):
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type} WB')
_EIS_WB_files = [
list(Path(d.joinpath("EIS/lin_Warburg")).rglob(f"lin_Warburg*.pkl"))
for d in self.EC_index_exp_destdirs
]
self._EIS_WB_files = _EIS_WB_files
self._EIS_WB_fls = (a for i in _EIS_WB_files for a in i)
_WB_lst = list(self.read_in_pars_files(self._EIS_WB_fls))
_EIS_WB_pars_all = pd.concat(_WB_lst, sort=False, ignore_index=True)
setattr(self, f"{self.exp_type}_WB", _EIS_WB_pars_all)
_EIS_WB_pars_all.to_pickle(self.daily_pickle_path.get("daily_path_RAW_WB"))
def _merge_WB_pars_raw(self):
_EIS_WB_pars_all = getattr(self, f"{self.exp_type}_WB")
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
_diffcols = set(EIS_pars_all.columns).difference(_EIS_WB_pars_all.columns)
_mcols = [
i
for i in set(EIS_pars_all.columns).intersection(_EIS_WB_pars_all.columns)
if i
not in [
"sourceFilename",
"source_mtime",
"source_delta_mtime",
"sourcebasename",
]
]
_dtype_mismatch = [
(i, EIS_pars_all[i].dtype, _EIS_WB_pars_all[i].dtype)
for i in _mcols
if EIS_pars_all[i].dtype != _EIS_WB_pars_all[i].dtype
]
if _dtype_mismatch:
_excl = []
for i in _dtype_mismatch:
try:
_EIS_WB_pars_all[i[0]] = _EIS_WB_pars_all[i[0]].astype(i[1])
except Exception as e:
_excl.append(i[0])
print(i, "\n", e)
_mcols = [i for i in _mcols if i not in _excl]
# EIS_pars_all[i[0]] = EIS_pars_all[i[0]].astype(i[2])
_merge = pd.merge(
EIS_pars_all, _EIS_WB_pars_all, on=_mcols, how="left", suffixes=("", "_WB")
)
if not _merge.empty:
return _merge
else:
print("WB merge was empty")
return EIS_pars_all
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def _raw_finish_edit_columns(self):
# EIS_pars_all = self._merge_WB_pars_raw(EIS_pars_all)
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
EIS_pars_all = EIS_pars_all.assign(
**{
"EIS_fake": [
"fakeZmean" in Path(i).name
for i in EIS_pars_all.PAR_file.to_numpy()
]
}
)
_not_in_index = EIS_pars_all.loc[
(
~(EIS_pars_all.PAR_file.isin(self.EC_index.PAR_file.values))
& (~EIS_pars_all.EIS_fake == True)
)
]
CleanUpCrew(list_of_files=_not_in_index.sourceFilename.unique(), delete=True)
EIS_pars_all = EIS_pars_all.iloc[
~(EIS_pars_all.index.isin(_not_in_index.index))
]
EIS_pars_all = Load_from_Indexes.test_update_from_index(
EIS_pars_all, self.EC_index
)
setattr(self, f"{self.exp_type}_RAW", EIS_pars_all)
def edit_raw_columns(self):
EIS_pars_all = getattr(self, f"{self.exp_type}_RAW")
# EIS_pars_RAW = self._raw_extra_steps(EIS_pars_RAW)
E_dc_RHE_cols = [
(np.round(i, 3), np.round(i, 3) * 1e3) for i in EIS_pars_all[EvRHE].values
]
EIS_pars_all = EIS_pars_all.assign(
**{
"E_dc_RHE": [i[0] for i in E_dc_RHE_cols],
"E_dc_RHE_mV": [i[1] for i in E_dc_RHE_cols],
}
)
EIS_pars_recent = EIS_pars_all.loc[
(EIS_pars_all.source_mtime > pd.Timestamp(dt.date(2020, 11, 25)))
& (EIS_pars_all.PAR_file.str.contains("None") == False)
]
EIS_pars_undup = EIS_pars_recent.dropna(subset=self.col_names).drop_duplicates(
keep="first"
)
# === POST EDITING OF LOADED PARS ===
EIS_pars_undup = EIS_pars_undup.assign(
**{"Loading_cm2": EIS_pars_undup["Loading_cm2"].round(3)}
)
EIS_pars_undup = post_helper.make_uniform_EvRHE(EIS_pars_undup)
EIS_pars_undup = CollectPostOVV.MatchECconditions(EIS_pars_undup)
# EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(EC_index, EIS_pars_undup)
_oc_OVV = list(EIS_pars_undup.columns.intersection(self.EC_index_exp.columns))
if not set(self.EC_index_exp.groupby(_oc_OVV).groups.keys()).intersection(
EIS_pars_undup.groupby(_oc_OVV).groups.keys()
):
_drpcols = [
a
for a in EIS_pars_undup.columns
if (
a in [i for i in _oc_OVV if i not in "PAR_file"]
or "_".join(a.split("_")[0:-1])
in [i for i in _oc_OVV if i not in "PAR_file"]
)
]
# EIS_pars_undup.drop(columns =_drpcols)
EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(
self.EC_index, EIS_pars_undup.drop(columns=_drpcols)
)
# EIS_pars_undup = pd.merge(EIS_pars_undup,EIS_OVV,on=_oc_OVV, how='left')
_oc_SC = list(EIS_pars_undup.columns.intersection(self.SampleCodes.columns))
EIS_pars_undup = pd.merge(
EIS_pars_undup, self.SampleCodes, how="left", on=_oc_SC
)
EIS_pars_BRUTE = EIS_pars_undup.loc[
(EIS_pars_undup.BRUTE_FIT == 1) | (EIS_pars_undup.FINAL_FIT == 0)
]
if self.BRUTE_out:
EIS_pars_BRUTE.to_pickle(eis_daily["daily_path_BRUTE"])
EIS_pars = EIS_pars_undup.loc[(EIS_pars_undup.FINAL_FIT == 1)]
EIS_pars = EIS_extra_methods.add_best_model_per_spectrum(EIS_pars)
setattr(self, self.exp_type, EIS_pars)
# def extra_stuff_delegator(self):
# try:
# self._extra_best_models()
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_best_models(self):
_err_type = "lmfit_MSE"
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-2) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 1) & (EIS_pars.tau < 1E3)"
_filter += '& (EIS_pars.SampleID.str.contains("JOS1|JOS2|JOS3|JOS4|JOS5"))'
_filter += "& (EIS_pars.EIS_fake == False)"
_grps = ["Model_EEC", "Gas", "lmfit_var_names"][0:2]
EIS_pars = self.EIS_pars
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values(["Gas", "mean"], ascending=True)
)
print(best_models)
keep_models = (
best_models.loc[(best_models["count"] > 5) & (best_models["std"] > 0)]
.index.get_level_values(0)
.unique()
)
EIS_pars = EIS_pars.loc[EIS_pars.Model_EEC.isin(keep_models)]
if hasattr(EIS_pars, "best_mod_name"):
# EIS_best_mods = EIS_pars.loc[EIS_pars.Model_EEC_name.isin([i for i in EIS_pars.best_mod_name.unique() if not pd.isna(i)])]
EIS_best_mods = EIS_pars.loc[
EIS_pars.index.isin(
[i for i in EIS_pars.best_mod_n.unique() if not pd.isna(i)]
)
]
self.EIS_pars_best_mods = EIS_best_mods
_agg = (
EIS_best_mods.dropna(subset=[_err_type])
.groupby(_grps + ["E_RHE"])[_err_type]
.agg(["count", "mean", "std"])
)
_agg_best = _agg.loc[_agg["count"] > 3].sort_values(
["Gas", "E_RHE", "mean"], ascending=True
)
def _extra_plotting(self):
if hasattr(self, "EIS_pars_best_mods"):
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Qad",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 0.05),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
)
self.EIS_pars_best_mods.query("pH < 15").plot(
y="Rs",
x="R_ion",
c="E_RHE",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 80),
xlim=(0.1, 2e3),
logx=True,
)
def _testing():
t2 = ORR_LoadPars(reload=True, reload_raw=True)
tf2 = ORR_LoadPars(reload=False, reload_raw=False)
t2._reload_raw
self = tf2
self.load_delegator()
self.make_raw_pars_from_scratch()
class ORR_LoadPars(BaseLoadPars):
read_types = ["ORR_pars", "KL_pars"]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="ORR_pars",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
def read_in_pars_files(self, _genlist):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_genlist)
# _source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
# _delta_mtime = dt.datetime.now() - _source_mtime
_i_stem = i.stem
_pparts = i.parent.parts
if "KL" == _pparts[-1]:
if _i_stem.startswith("KL_"):
_type = "KL_data"
else:
_type = "KL_unknown"
elif "RingDisk" == _pparts[-1]:
_type = "ORR_ringdisk"
elif "TAFEL" == _pparts[-1]:
_type = "Tafel"
else:
if _i_stem.startswith("ORR_pars"):
_type = "ORR_pars"
elif _i_stem.startswith("KL_pars"):
_type = "KL_pars"
elif _i_stem.startswith("O2_ORR") and _i_stem.endswith(
f"_RRDE_v{FileOperations.version}"
):
_type = "ORR_RRDE"
else:
_type = "O2_ORR_unknown"
_meta = self.get_source_meta(i)
_meta.update({"source_type": _type})
if _type in self.read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def search_pars_files(self, dest_dir):
return Path(dest_dir.joinpath(f"ORR_v{FileOperations.version}")).rglob("*xlsx")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
# self._raw_extra_steps()
_logger.info(f'Reloading "{self.__class__.__name__}" {self.exp_type}')
# self.EIS_pars_RAW = EIS_pars_RAW
def edit_raw_columns(self):
### Fixing the pars after loading...
# TODO : taking out duplicates based on time_since_run....
ORR_pars_char = getattr(self, f"{self.exp_type}_RAW")
# Load_na = ORR_pars_char.loc[(ORR_pars_char.Loading_cm2.isna()) & (ORR_pars_char.PAR_file.isna() == False)]
# if not Load_na.empty:
# Load_na_missingvalues =[(n,*GetSampleID.ink_loading_from_filename(i.PAR_file)) for n,i in Load_na.iterrows()]
# Load_na_vals = pd.DataFrame(Load_na_missingvalues).rename(columns={1 : 'Loading_name',2 : 'Loading_cm2'}).set_index([0])
# ORR_pars_char.Loading_cm2.fillna(value=Load_na_vals.Loading_cm2,inplace=True)
# # ORR_char_merge_cols = [i for i in ORR_pars.columns if i in SampleCodes.columns]
ORR_pars_char = ORR_pars_char.drop(
columns=[i for i in ORR_pars_char.columns if "Unnamed" in i]
)
if not ORR_pars_char.loc[ORR_pars_char.Loading_cm2.isna()].empty:
_loading_cols = ["Loading_cm2", "Loading_name", "Loading_date"]
ORR_pars_char = ORR_pars_char.drop(columns=_loading_cols)
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file"] + _loading_cols],
on="PAR_file",
how="left",
)
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.fillna(
value=0.379
) # fillna for Loading_cm2
ORR_pars_char.Loading_cm2 = ORR_pars_char.Loading_cm2.round(3)
if ORR_pars_char.postAST.dropna().empty:
ORR_pars_char = ORR_pars_char.drop(columns="postAST")
# _int = list(set(ORR_pars_char.columns).intersection(set(EC_index.columns)))
ORR_pars_char = pd.merge(
ORR_pars_char,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
ORR_pars_char = make_uniform_RPM_DAC(ORR_pars_char)
setattr(self, f"{self.exp_type}", ORR_pars_char)
# def extra_stuff_delegator(self):
# try:
# self._extra_plotting()
# except Exception as e:
# _logger.info(f'{self.__class__.__name__} Extra stuff failed because {e}')
def _extra_plotting(self):
ORR_pars_char = getattr(self, f"{self.exp_type}")
for swp, swgrp in ORR_pars_char.query("(pH < 14) & (RPM_DAC > 900)").groupby(
"Sweep_Type"
):
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
# plt.figure()
swgrp.plot(
y="ORR_Jkin_min_750",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 50],
xlim=(0.5, 1),
ax=ax1,
)
ax1.set_xlabel("E onset / mV_RHE")
swgrp.plot(
y="ORR_Frac_H2O2_600",
x="ORR_E_onset",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
colormap="rainbow_r",
ylim=[0.1, 100],
xlim=(0.5, 1),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with E_onset")
plt.show()
fig, (ax1, ax2) = plt.subplots(figsize=(10, 4), ncols=2)
swgrp.plot(
y="ORR_E_onset",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.5, 1),
ax=ax1,
)
swgrp.plot(
y="ORR_Jkin_min_750",
x="N2_BG_lin_slope",
c="pH",
title=f"{swp}",
kind="scatter",
logy=True,
logx=True,
colormap="rainbow_r",
xlim=[0.01, 4],
ylim=(0.001, 50),
ax=ax2,
)
# ax2.set_xlabel('E onset / mV_RHE')
plt.suptitle("ORR with N2_BG lin slope")
plt.show()
plt.close()
def _N2_testing():
n2 = N2_LoadPars(reload=True, reload_raw=True)
n2r = N2_LoadPars(reload=True, reload_raw=False)
class N2_LoadPars(BaseLoadPars):
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
BRUTE_out=False,
**kws,
):
self.BRUTE_out = BRUTE_out
super().__init__(
EC_index=EC_index, SampleCodes=SampleCodes, exp_type=exp_type, **kws
)
@func_timer_decorator
def make_raw_pars_from_scratch(self):
_logger.info(
f'Reloading raw extra steps "{self.__class__.__name__}" {self.exp_type}'
)
self.reload_raw_df_delegator()
if hasattr(self, "_raw_finish_edit_columns"):
self._raw_finish_edit_columns()
self.save_daily_raw()
def _old(self):
IndexOVV_N2_pars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"N2Cdl_pars_IndexOVV_v{0}.pkl.compress".format(FileOperations.version)
)
n2_daily = get_daily_pickle(exp_type="N2_all")
if n2_daily.get("_exists", False) and reload != True:
# Cdl_pars_char = pd.read_excel(IndexOVV_N2_pars_fn,index_col=[0])
Cdl_pars_char = pd.read_pickle(n2_daily.get("daily_path"))
Cdl_pars_char = FileOperations.ChangeRoot_DF(
Cdl_pars_char, [], coltype="string"
)
else:
# @@ Check POST_AST status from OVV and PRM
_logger.info(
f'START reloading N2_pars OVV from daily {n2_daily["today"]:%Y-%m-%d}'
)
# EC_index = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
# EC_index = FileOperations.ChangeRoot_DF(OnlyRecentMissingOVV,[])
# OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# OnlyRecentMissingOVV['Loading_cm2'] = OnlyRecentMissingOVV['Loading_cm2'].round(3)
# SampleCodes = SampleCodesChar().load
# EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
# def read_df(_par_fls, ):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
def search_pars_files(self, destdir):
return Path(destdir.joinpath(f"N2_scans_v{FileOperations.version}")).rglob(
"*.xlsx"
)
def read_in_pars_files(self, _genlist, read_types=["Cdl_data", "Cdl_pars"]):
while True:
try:
i = next(_genlist)
_i_stem = i.stem
_meta = self.get_source_meta(i)
if _i_stem.endswith("_BG"):
_N2_type = "BG"
else:
if _i_stem.startswith("CV_"):
_N2_type = "CV"
if _i_stem.endswith(f"_first_v{FileOperations.version}"):
_N2_type = "CV_first"
# if not 'Scan Rate' in _pp.columns:
# 'N2_CV_raw = N2_CV_raw.assign(**{'ScanRate' : [i.split(f'_v{FileOperations.version}')[0].split('_')[-1] for i in N2_CV_raw.basename.to_numpy()]})
elif _i_stem.startswith("Cdl_data_"):
_N2_type = "Cdl_data"
elif _i_stem.startswith("Cdl_pars"):
_N2_type = "Cdl_pars"
else:
_N2_type = "N2_unknown"
_meta.update({"N2_type": _N2_type})
if _N2_type in read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
# _meta.update({'DF' : _pp})
yield _pp
except StopIteration:
return "all done"
print("gen empty")
def reload_raw_df(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
if not _pars_RAW.empty:
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(
f'Reloading "{self.__class__.__name__}" {self.exp_type} len({len(_pars_RAW)}'
)
def _old_stuff():
if n2_daily.get("_raw_exists", False) and use_daily is True:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_path_RAW"))
elif n2_daily.get("daily_options_RAW", False) and use_daily is True:
if n2_daily.get("daily_options_RAW")[-1]:
N2_pars_all = pd.read_pickle(n2_daily.get("daily_options_RAW")[-1])
else: # Construct new N2 pars ovv from reading in files
N2_OVV = EC_index.loc[EC_index.PAR_exp == "N2_act"]
_par_files = [
list(Path(d.joinpath("N2_scans_v30")).rglob("*.xlsx"))
for d in N2_OVV.Dest_dir.unique()
]
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
_par_reads = read_df(_par_fls, read_types=["Cdl_data", "Cdl_pars"])
N2_pars_all = pd.concat([i["DF"] for i in _par_reads], sort=False)
for n, gr in N2_pars_all.groupby("PAR_file"):
print(
n,
f'\nSamples: {", ".join([str(i) for i in gr.SampleID.unique()])}',
",".join(gr.N2_type.unique()),
)
N2_pars_all, _missing_index = Load_from_Indexes.check_missing_ECindex(
EC_index, N2_pars_all, clean_up=True
)
N2_pars_all.to_pickle(n2_daily["daily_path_RAW"])
def _extra_pivot_CV(self):
N2_type_grps = N2_pars_all.groupby("N2_type")
if "CV" in N2_type_grps.groups.keys():
# N2 CVs TODO add Scan Rate column
N2_CV_raw = N2_type_grps.get_group("CV").dropna(axis=1, how="all")
# N2_CV_raw.plot(x=EvRHE,y='jmAcm-2')
N2_CV_pivot_SR_lst = []
for PF, PFgr in N2_CV_raw.groupby("PAR_file"):
# PF ,PFgr
for swp, swgrp in PFgr.groupby("Sweep_Type"):
# swp, swgrp
# swgrp.plot(x=EvRHE,y='jmAcm-2')
# E_T_idx = pd.MultiIndex.from_tuples(zip(swgrp['Elapsed Time(s)'].to_numpy(),swgrp[EvRHE].to_numpy()),names=['Elapsed_Time_s',EvRHE])
# swgrp.index = E_T_idx
# {n : len(gr) for n,gr in swgrp.groupby('Segment #')}
pvt = swgrp.pivot(
index="Elapsed Time(s)",
columns="ScanRate_mVs",
values=[EvRHE, "jmAcm-2", "Segment #"],
)
# pvt = swgrp.pivot(index=EvRHE,columns='ScanRate_mVs',values='jmAcm-2')
pvt.columns = pd.MultiIndex.from_tuples(
[(f"{i[0]}_{int(i[1])}", i[1]) for i in pvt.columns]
)
# pvt.rename(columns=pd.MultiIndex.from_tuples([(f'{i[0]}_{int(i[1])}', i[1]) for i in pvt.columns],names=['data','ScanRate_mVs']),inplace=True)
indx = pd.MultiIndex.from_tuples(
zip(repeat(PF), repeat(swp), pvt.index),
names=["PAR_file", "Sweep_Type", EvRHE],
)
pvt.index = indx
N2_CV_pivot_SR_lst.append(pvt)
# for sr, srgrp in PFgr.groupby('ScanRate_mVs'):
# SR = int(sr)
N2_CV_pivot_SR = pd.concat(N2_CV_pivot_SR_lst, sort=False)
# N2Cdl_pars_index = N2_grps.groupby('N2_type').get_group('Cdl_pars')
# N2Cdl_pars_files = [Path(i) for i in N2Cdl_pars_index['SourceFilename'].unique() if re.search('(?i)(_pars|_v20)',Path(i).stem) and Path(i).exists()]
# cdl = pd.read_excel(N2Cdl_pars_files[0],index_col=[0])
# N2Cdl_pars.rename(columns={'Filename' : 'PAR_file'})
# EPtest = N2Cdl_pars_index.loc[no_match] # a slice for testing purpose
# pd.merge(N2Cdl_pars_raw,N2_CV_index[['PAR_file','DestFile']],on='PAR_file',how='left')
# N2Cdl_pars_raw = N2_type_grps.get_group('Cdl_pars').dropna(axis=1,how='all')
# N2Cdl_data_index = postOVVout.groupby('Type_output').get_group('N2_Cdl_data')
# N2_CV_index = postOVVout.groupby('Type_output').get_group('N2_CV')
# lst, no_match, non_exist = [],[],[]
# for n,r in N2Cdl_pars_raw.iterrows():
# Cdl_data_file = N2Cdl_data_index.loc[N2Cdl_data_index.PAR_file == r.PAR_file].DestFile.unique()
# CV_files = N2_CV_index.loc[N2_CV_index.PAR_file == r.PAR_file].DestFile.unique()
# lst.append([set(Cdl_data_file),set(CV_files)])
# if len(N2Cdl_pars_raw) == len(lst):
# N2Cdl_pars_raw = N2Cdl_pars_raw.assign(**{'Cdl_data_file' : [i[0] for i in lst], 'Cdl_CV_data_files' : [i[1] for i in lst]})
# Cdl_pars = pd.concat([i for i in lst],sort=False,ignore_index=True)
def edit_raw_columns(self):
N2Cdl_pars_raw = getattr(self, f"{self.exp_type}_RAW")
N2_type_grps = N2Cdl_pars_raw.groupby("N2_type")
N2Cdl_pars_raw = N2_type_grps.get_group("Cdl_pars").dropna(axis=1, how="all")
N2Cdl_pars_raw.drop_duplicates(
subset=N2Cdl_pars_raw.columns[0:19], keep="first", inplace=True
)
N2Cdl_pars_raw = FileOperations.ChangeRoot_DF(
N2Cdl_pars_raw, [], coltype="string"
)
Cdl_pars = post_helper.make_uniform_EvRHE(N2Cdl_pars_raw)
Cdl_pars.drop_duplicates(subset=Cdl_pars.columns[0:19], inplace=True)
# Cdl_pars_merge_cols = [i for i in Cdl_pars.columns if i in SampleCodes.columns and not 'Unnamed' in i]
# Cdl_pars_char = pd.merge(Cdl_pars,SampleCodes,on=Cdl_pars_merge_cols,how='left')
# Cdl_pars_char.drop_duplicates(subset=Cdl_pars_char.columns[0:19],inplace=True)
_int = list(set(Cdl_pars.columns).intersection(set(self.EC_index.columns)))
if Cdl_pars.postAST.dropna().empty and len(self.EC_index.columns) != len(_int):
Cdl_pars = Cdl_pars.drop(columns="postAST")
# _int = list(set(Cdl_pars_char.columns).intersection(set(EC_index.columns)))
Cdl_pars = pd.merge(
Cdl_pars,
self.EC_index[["PAR_file", "postAST"]],
on="PAR_file",
suffixes=("", ""),
)
Cdl_pars = Load_from_Indexes.add_missing_ECindex_cols(self.EC_index, Cdl_pars)
setattr(self, f"{self.exp_type}", Cdl_pars)
def _extra_xls_out(self):
if xls_out:
new_N2_pars_char_target = FileOperations.CompareHashDFexport(
Cdl_pars_char, IndexOVV_N2_pars_fn
)
_logger.info(
"PostEC Cdl N2 CVs re-indexed and saved: {0}".format(
new_N2_pars_char_target
)
)
Cdl_pars_char.to_pickle(IndexOVV_N2_pars_fn)
def _extra_plotting(self):
try:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').plot(
y="Cdl",
x="E_RHE",
kind="scatter",
ylim=(0, 0.08),
title="checking plot: Cdl in acid",
)
# Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH < 7)').groupby('BET_cat_agg').plot(y='Cdl',x='E_RHE',colormap='viridis',kind='scatter',ylim=(0,0.08),title='Cdl in acid')
if extra_plotting:
Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)').plot(
y="Cdl",
x="E_RHE",
c="BET_cat_agg",
colormap="viridis",
kind="scatter",
ylim=(0, 0.03),
title="Cdl in alkaline",
)
alkCdl = Cdl_pars_char.query('(Sweep_Type_N2 == "cathodic") & (pH > 7)')
acidCdl = Cdl_pars_char.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot_trisurf(alkCdl.E_RHE,alkCdl.Cdl,alkCdl.BET_cat_agg,cmap=cm.viridis)
Cdl_atE = Cdl_pars_char.loc[
(Cdl_pars_char.Sweep_Type_N2 == "cathodic")
& (np.isclose(Cdl_pars_char["E_RHE"], 0.5, atol=0.02))
]
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH < 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in acid",
ax=ax,
ylim=(0, 50e-3),
)
fig, ax = plt.subplots()
for n, Ogr in Cdl_atE.query(
'(Sweep_Type_N2 == "cathodic") & (pH > 7)'
).groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="BET_cat_agg",
y="Cdl",
s=50,
c=c_set,
kind="scatter",
label=n,
title="N2 Cdl vs BET in alk",
ax=ax,
ylim=(0, 50e-3),
)
except Exception as e:
_logger.warning(f"PostEC Cdl N2 CVs extra plotting fail:\n{e}")
class CollectPostOVV:
"""Loops over all index files and merges them with the RunOVV"""
def __init__():
pass
@staticmethod
def LoadPostOVV(reload=False):
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
SampleCodes = FindExpFolder().LoadSampleCode()
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
if reload == True:
postOVVout = CollectPostOVV.LoadIndexes(reload=True)
else:
try:
postOVVout = CollectPostOVV.LoadIndexes(reload=False)
except Exception as e:
logging.warning(
"CollectPostOVV no Indexes available: {0}. Using postEC_Organized".format(
e
)
)
postOVVout = pd.read_excel(
PostDestDir.joinpath("postEC_Organized.xlsx"), index_col=[0]
)
# pd.read_excel(PostDestDir.joinpath('SampleCodeLst.xlsx'))
# CS_parts_pOVV = FileOperations.find_CS_parts(postOVVout.Exp_dir.iloc[0])
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in postOVVout.SourceFilename.values]
# postOVVout['SourceFilename'] = chLst
# else:
# pass
postSample = pd.merge(postOVVout, SampleCodes, on="SampleID", how="left")
print("Types:", " , ".join([str(i) for i in postSample.Type_output.unique()]))
postSample.PAR_file = postSample.PAR_file.astype(str)
postSample = FileOperations.ChangeRoot_DF(
postSample,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
return postSample
# def RunFolderCopy(serie):
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in serie.groupby(by=['Dest_dir'])]
# return postOVVlst
@staticmethod
def LoadIndexes(reload=False):
IndexOVV_fn = FindExpFolder("VERSASTAT").DestDir.joinpath(
"IndexOVV_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_fn.exists() and not reload:
Index_merged = pd.read_excel(IndexOVV_fn, index_col=[0])
Index_merged = FileOperations.ChangeRoot_DF(
Index_merged,
[
"EXP_dir",
"Dest_dir",
"PAR_file",
"PAR_file_Ring",
"ORR_act_N2_bg",
"DestFile",
"SourceFilename",
],
)
_logger.info("PostEC loaded IndexOVV from recent: {0}".format(IndexOVV_fn))
else:
_logger.info(
"PostEC reloading IndexOVV from Index files and Exp dir files!!"
)
OnlyRecentMissingOVV = ECRunOVV(load=1).index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, []
)
OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# if index_source == 'ExpDirs':
idx_files = [
list(Path(i).rglob("**/*index*.xlsx"))
for i in OnlyRecentMissingOVV.Dest_dir.unique()
if list(Path(i).rglob("**/*index.xlsx"))
]
# for i in OnlyRecentMissingOVV.Dest_dir.unique():
# [idx_files.append([a for a in a if a]) for a in [(Path(i).rglob('index.xlsx')) for i in OnlyRecentMissingOVV.Dest_dir.unique()]]
# idx_dir = FindExpFolder('VERSASTAT').IndexDir
# idx_files = idx_dir.rglob('*.xlsx')
# subset=['PAR_file','DestFile','Type_output','Script_run_date']
idx_lst = set([a for i in idx_files for a in i])
idx_mtime = [
(i, (dt.datetime.now() - dt.datetime.fromtimestamp(i.stat().st_mtime)))
for i in idx_lst
]
# print(f'len {len(idx_lst)} and set {len(set(idx_lst))}')
alst = (
[]
) # Alternative = pd.concat([[pd.read_excel(c,index_col=[0]) for c in a ] for b in idx_files],sort=False,ignore_index=True)
for idxfp in idx_lst:
df = pd.read_excel(idxfp, index_col=[0])
df["IndexSource"] = idxfp
alst.append(df)
Index_from_expdirs_all = pd.concat(
[i for i in alst], sort=False, ignore_index=True
)
Index_from_expdirs_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_expdirs = Index_from_expdirs_all.drop_duplicates(keep="first")
Index_from_expdirs = FileOperations.ChangeRoot_DF(Index_from_expdirs, [])
idx_exp_tDelta = [
(n, pd.to_datetime(dt.datetime.now()) - i["Script_run_date"])
for n, i in Index_from_expdirs.iterrows()
]
Index_from_expdirs = Index_from_expdirs.assign(
**{
"Source": "ExpDirs",
"Time_since_run": [pd.to_timedelta(i[1]) for i in idx_exp_tDelta],
}
)
# Index_from_expdirs['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index_from_expdirs['Script_run_date'].values]
# limit = pd.to_timedelta('7h')
# ['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Index['Script_run_date'].values]
# Index = Index.loc[Index['Time_since_run'] < limit]
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
# else:
# dups.append(gr.Time_since_run.idxmin())
# 1elif index_source == 'IndexDir':
IndexDir_idxfiles = list(
FindExpFolder("VERSASTAT").IndexDir.rglob("*.xlsx")
)
Index_from_idxdir_all = pd.concat(
[
pd.read_excel(i, index_col=[0]).assign(IndexSource=i)
for i in IndexDir_idxfiles
],
sort=False,
ignore_index=True,
)
Index_from_idxdir_all.sort_values(
"Script_run_date", ascending=False, inplace=True
)
Index_from_idxdir = Index_from_idxdir_all.drop_duplicates(keep="first")
Index_from_idxdir = FileOperations.ChangeRoot_DF(Index_from_idxdir, [])
Index_from_idxdir = Index_from_idxdir.assign(**{"Source": "IndexDir"})
Index_from_idxdir["Time_since_run"] = [
pd.to_timedelta(pd.to_datetime(dt.datetime.now()) - i)
for i in Index_from_idxdir["Script_run_date"].values
]
# dup_idxdir = Index_from_idxdir.loc[Index_from_idxdir.DestFile.duplicated() == True]
dups_date, singles, others, unused_dups = [], [], [], []
for n, gr in Index_from_idxdir.groupby(
["PAR_file", "DestFile", "Type_output"]
):
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
# print(n,gr.Time_since_run.unique())
dups_date.append(gr.Time_since_run.idxmin())
unused_dups.append(
list(set(gr.index) - {gr.Time_since_run.idxmin()})
)
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
dup_fltr_idxdir = Index_from_idxdir.loc[singles + dups_date]
# Indexes = pd.merge(Index_from_expdirs,Index_from_idxdir, on=['PAR_file','DestFile','Type_output','ScanRate','Segment','Sweep_Type','Source'])
Indexes = pd.concat([Index_from_expdirs, dup_fltr_idxdir], sort=False)
# Indexes['Time_since_run'] = [pd.to_timedelta(pd.to_datetime(datetime.now())-i) for i in Indexes['Script_run_date'].values]
Indexes = Indexes.dropna(
subset=["PAR_file", "DestFile", "Type_output"]
).reset_index()
dups_date, singles, others = [], [], []
Idxgr = Indexes.groupby(["PAR_file", "DestFile", "Type_output"])
for n, gr in Idxgr:
# Indexes.groupby(['PAR_file','DestFile','Type_output','ScanRate','Segment']):
if len(gr) > 1:
dgr = gr
idxmin = gr.Time_since_run.idxmin()
# print(n,gr.Time_since_run.unique())
dups_date.append([idxmin, gr.loc[idxmin, "Source"]])
elif len(gr) == 1:
singles.append(gr.index[0])
else:
others.append(gr.index)
# for n2,gr2 in OnlyRecentMissingOVV.groupby('PAR_file'):
# if len(gr2) > 1:
# dgr2 = gr2
# Index = Index.iloc[dups].loc[Index['Time_since_run'] < limit]
Index = Indexes.loc[singles + [i[0] for i in dups_date]].dropna(
subset=["DestFile"]
)
# for a in Index.DestFile.values:
# try: Path(a).is_file()
# except: print(a)
# if not any([Path(i).exists() for i in Index.DestFile.values]):
# Index = FileOperations.ChangeRoot_DF(Index,['PAR_file','DestFile']) 'EXP_dir','Dest_dir','PAR_file','PAR_file_Ring','ORR_act_N2_bg','DestFile','SourceFilename'
Index = FileOperations.ChangeRoot_DF(Index, [])
Index = Index.assign(
**{
"Type_Exp": Index["Type_output"],
"SourceFilename": [Path(str(i)) for i in Index["DestFile"].values],
}
)
# Index['Type_Exp'] = Index['Type_output']
# Index['SourceFilename'] = [Path(str(i)) for i in Index['DestFile'].values]
Index.PAR_file = Index.PAR_file.astype(str)
Index_undup = Index.loc[
(
Index.duplicated(
subset=[
"PAR_file",
"DestFile",
"Type_output",
"Time_since_run",
"Source",
]
)
== False
)
]
idx_merge_cols = [
i
for i in Index_undup.columns
if i in OnlyRecentMissingOVV.columns and not "Segment" in i
]
Index_merged = pd.merge(
Index_undup, OnlyRecentMissingOVV, on="PAR_file", how="left"
)
Index_merged.PAR_file = [
Path(str(i)) for i in Index_merged["PAR_file"].values
]
new_IndexOVV_target = FileOperations.CompareHashDFexport(
Index_merged, IndexOVV_fn
)
try:
_logger.info(
"PostEC re-indexed and saved: {0}".format(new_IndexOVV_target)
)
except:
print("no log")
return Index_merged
@staticmethod
def MatchPostASTs(postOVVout):
# postOVVout.postAST.unique()
# [(n,len(gr)) for n,gr in postOVVout.groupby('postAST')]
faillst, fail_index_gr = [], []
matchAST_lst, non_uniq_lst = [], []
for nAST, ASTgr in postOVVout.query(
'(postAST != "no") & (postAST != "postORR")'
).groupby(["postAST", "PAR_date", "PAR_file"]):
nAST, ASTgr
# for nDT,grDT in ASTgr.groupby(')
if ASTgr.PAR_file.nunique() == 1 and ASTgr.Source.nunique() > 1:
ASTgr_grSource = ASTgr.groupby("Source")
ASTgr_info = [
(n, len(gr), gr.Time_since_run.mean()) for n, gr in ASTgr_grSource
]
if len(set([i[1] for i in ASTgr_info])) == 1:
take_source = ASTgr_info[np.argmin([i[2] for i in ASTgr_info])][0]
ASTgr = ASTgr_grSource.get_group(take_source)
fail_index_source_gr = ASTgr_grSource.get_group(
ASTgr_info[np.argmax([i[2] for i in ASTgr_info])][0]
)
fail_index_gr.append(fail_index_source_gr)
EC_exp_uniq = [
(i, ASTgr[i].unique(), ASTgr[i].nunique())
for i in [
c
for c in SampleSelection.EC_exp_cols
+ ["SampleID", "Type_exp", "PAR_file"]
if c in ASTgr.columns
]
]
EC_exp_non_uniq = [i for i in EC_exp_uniq if i[2] != 1]
if EC_exp_non_uniq:
print(
"Not unique PAR_date {0},multiple: {1}".format(
nAST[1], EC_exp_non_uniq
)
)
non_uniq_lst.append([nAST, EC_exp_non_uniq, EC_exp_uniq])
faillst.append(ASTgr)
EC_exp_query = " & ".join(
[
'({0} == "{1}")'.format(i[0], i[1][0])
for i in EC_exp_uniq[1:-1] + [("postAST", ["no"])]
if not "Loading" in i[0]
]
)
past = nAST[1] - pd.to_timedelta(1, unit="D")
past_slice = postOVVout.query("(PAR_date > @past) & (PAR_date < @nAST[1])")
past_query = past_slice.query(EC_exp_query)
if past_query.query(EC_exp_query).empty:
# expand search to all OVV for similar conditions
all_query = postOVVout.query(EC_exp_query)
if not all_query.empty:
preAST = tuple(all_query.PAR_file.unique())
else:
preAST = "no-preAST"
else:
# find previous preAST measurments
preAST = tuple(past_query.PAR_file.unique())
matchAST_lst.append(list(nAST) + [preAST])
if fail_index_gr:
fail_index_filter = pd.concat(fail_index_gr)
postOVVout = postOVVout.loc[
~postOVVout.index.isin(fail_index_filter.index), :
]
non_uniq = pd.DataFrame(non_uniq_lst)
if faillst:
fails = pd.concat(faillst)
matchAST = pd.DataFrame(
matchAST_lst, columns=["postAST", "PAR_date", "PAR_file", "preAST"]
)
postOVVout = pd.merge(
postOVVout, matchAST[["PAR_file", "preAST"]], on="PAR_file", how="left"
)
return postOVVout
# ASTgr.SampleID.unique()
@staticmethod
def MatchECconditions(OVV_df):
# postOVVout.postAST.unique()
# [(n,len(gr)) for n,gr in postOVVout.groupby('postAST')]
matchAST_lst = []
# 'DW16_2018-03-06 00:00:00_no_0.1MHClO4+10mMH2O2_1.0_0.379'
OVV_df["PAR_date_day"] = [
dt.datetime.strftime(i, format="%Y-%m-%d")
for i in OVV_df.PAR_date.fillna(dt.date(1970, 12, 12)).to_list()
]
# [pd.datetime.strftime(pd.to_datetime(i),format='%Y-%m-%d') for i in postOVVout.PAR_date.fillna(0).to_list()]
EC_label_cols = [
"SampleID",
"pH",
"Electrolyte",
"Loading_cm2",
"postAST",
"PAR_date_day",
]
post_prev_cols = OVV_df.columns
# +[i for i in SampleSelection.EC_exp_cols if i not in ['RPM','Gas']]
for nAST, ASTgr in OVV_df.groupby(EC_label_cols):
nAST, ASTgr
# for nDT,grDT in ASTgr.groupby(')
minDT, maxDT = ASTgr.PAR_date.min(), ASTgr.PAR_date.max()
deltaDT = maxDT - minDT
# par_Day = pd.datetime.strftime(nAST[-1],format='%Y-%m-%d')
EC_exp_query = "_".join([str(i) for i in list(nAST)])
EC_exp_nodate = "_".join([str(i) for i in list(nAST)[0:-1]])
matchAST_lst.append(
pd.DataFrame(
[
(i, EC_exp_query, EC_exp_nodate, deltaDT)
for i in ASTgr.PAR_file.unique()
],
columns=["PAR_file", "ECexp", "ECuniq", "EC_deltaDT"],
)
)
EC_exp_match = pd.concat(
[i for i in matchAST_lst], ignore_index=True, sort=False
)
OVV_df = pd.merge(OVV_df, EC_exp_match, on=["PAR_file"], how="left")
print(
'Added columns: "{0}" to postOVV with len({1})'.format(
", ".join(list(set(post_prev_cols) - set(OVV_df.columns))), len(OVV_df)
)
)
return OVV_df
# ASTgr.SampleID.unique()
# merge_cols = [i for i in Index.columns if i in OnlyRecentMissingOVV.columns and not 'Segment' in i]
# p2,ovv2 = Index.set_index(merge_cols), OnlyRecentMissingOVV.set_index(merge_cols)
# merge = p2.update(ovv2)
# merge = p2.combine_first(ovv2)
# else:
# AllEIS_BoL = pd.concat([pd.read_excel(i) for i in list(PostDestDir.joinpath('EIS','0.1MH2SO4').rglob('*BoL*'))])
# AllEIS_EoL = pd.concat([pd.read_excel(i) for i in list(PostDestDir.joinpath('EIS','0.1MH2SO4').rglob('*EoL*'))])
# AllEIS_BoL = AllEIS_BoL.loc[(AllEIS_BoL['Unnamed: 0'] > 0.2901) & (AllEIS_BoL['Unnamed: 0'] < 0.301) & (AllEIS_BoL.SampleID != 'O2'),:]
# AllEIS300_EoL = AllEIS_EoL.loc[(AllEIS_EoL['Unnamed: 0'] > 0.2901) & (AllEIS_EoL['Unnamed: 0'] < 0.301) & (AllEIS_EoL.SampleID != 'O2'),:]
# .query('(EXP_date > 20181001)')
# refl = []
# for a in postOVVout.SampleID.values:
# ScodeRef = SampleCodes.loc[SampleCodes.SampleID == a,:]
# if ScodeRef.empty:
# Scode = EISovv['SampleID'].unique()[0]
# else:
# Scode = ScodeRef.Sample.values[0]
# refl.append(Scode)
# postOVVout['SampleLabel'] = refl
# return postOVVout
# for a in postOVVout.SampleID.values:
# ScodeRef = SampleCodes.loc[SampleCodes.SampleID == a,:]
# if ScodeRef.empty:
# Scode = EISovv['SampleID'].unique()[0]
# else:
# Scode = ScodeRef.Sample.values[0]
# refl.append(Scode)
# postOVVout['SampleLabel'] = refl
# postOVVout.loc[postOVVout.Type_Exp == 'EIS_Combined']
# def recently_modified(file,20):
# file_mtime = pd.to_datetime(DestFile.stat().st_mtime,unit='s')
class Load_from_Indexes:
"""This class loads the parameters of Electrochemical Data files and merge it with the Overview"""
SampleCodes = FindExpFolder().LoadSampleCode()
# EC_label_cols = ['SampleID','pH','Electrolyte','Loading_cm2','postAST','PAR_date_day']
EC_label_cols = [
"PAR_file",
"SampleID",
"postAST",
"Loading_cm2",
"Electrolyte",
"pH",
"Gas",
"RPM_DAC",
"E_RHE",
]
PostDestDir = FindExpFolder("VERSASTAT").PostDir
def __init__(self, **kwargs):
if "reload" in kwargs:
# self.postOVVout = CollectPostOVV.LoadPostOVV(kwargs['reload'])
print(
"Exp types found in overview: {0}".format(
", ".join([str(i) for i in self.postOVVout.Type_Exp.unique()])
)
)
pass
@staticmethod
def PreparePostOVV(fastload=False):
postOVV_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
"PostOVVout_v20_{0}.pkl.compress".format(system())
)
if postOVV_pickle_path.is_file():
tdelta = dt.datetime.now() - dt.datetime.fromtimestamp(
postOVV_pickle_path.stat().st_mtime
)
if tdelta.seconds > 600:
fastload = False
print(f"Fastload overwrite to False, {tdelta}")
if fastload == True:
try:
postOVVout = pd.read_pickle(postOVV_pickle_path, compression="xz")
return postOVVout
except Exception as e:
print("Load postOVVout from pickle error: ", e)
LoadOVV = Load_from_Indexes(reload=True)
else:
LoadOVV = Load_from_Indexes(reload=True)
postOVVout = LoadOVV.postOVVout
print("Types:", " , ".join([str(i) for i in postOVVout.Type_output.unique()]))
postOVVout.Loading_cm2 = np.round(postOVVout.Loading_cm2, 3)
postOVVout = CollectPostOVV.MatchPostASTs(postOVVout)
postOVVout = CollectPostOVV.MatchECconditions(postOVVout)
postOVVout.PAR_file = postOVVout.PAR_file.astype(str)
postOVVout["PAR_date_day"] = [
pd.datetime.strftime(pd.to_datetime(i), format="%Y-%m-%d")
for i in postOVVout.PAR_date.fillna(0).values
]
postOVVout = FileOperations.ChangeRoot_DF(postOVVout, [], coltype="string")
postOVVout.to_pickle(postOVV_pickle_path, compression="xz")
return postOVVout
def CollectAllExpTypeOVV():
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
today = datetime.today()
postOVVout = Load_from_Indexes.PreparePostOVV(fastload=False) # len(22965)
# postOVVout.PAR_file = postOVVout.PAR_file.astype(str)
# === Loading preparation overview of Samples and merging with the data from Characterization techniques === #
SampleCodes = PostChar.SampleCodeChar()
#
Reload_set = True
logger = start_logger()
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # EIS_Pars2 6745, 22813
HPRR_pars = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # HPRR 1668
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=Reload_set) # Cdl runs 20322
Cdl_pars_catan = MergeEISandCdl.splitcol_Sweep_Cdl(Cdl_pars) # 10342
HER_pars = Load_from_Indexes.HER_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # 2539
OER_pars = Load_from_Indexes.OER_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # run 1347
if list(
PostDestDir.rglob(
f"{today.year}-{today.month}-*_ORR_pars_{system()}.pkl.compress"
)
)[-1].is_file():
ORR_pars = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=Reload_set
) # ORR 1908
ORR_pars.to_pickle(
PostDestDir.joinpath(
f"{today.year}-{today.month}-{today.day}_ORR_pars_{system()}.pkl.compress"
)
)
EIS_pars.to_pickle(
PostDestDir.joinpath(
f"{today.year}-{today.month}-{today.day}_EIS_pars_{system()}.pkl.compress"
)
)
# FindExpFolder().LoadSampleCode()
# SampleCodes = ExportECfromCV.SampleCodes
# SampleSelect_all = SampleSelection('*','*')
# SampleCodesChar = SampleSelect_all.Prep_EA_BET
# SampleCodes = pd.merge(SampleCodes,SampleCodesChar,how='left',on='SampleID',suffixes=('','_char')).drop_duplicates(subset=['SampleID','N_content'])
# === Start preparing pars OVV from index per Experimental type === #
# postOVVout,SampleCodes = pd.DataFrame(),pd.DataFrame()
def extraPostOVV():
OnlyRecentMissingOVV = run_PAR_DW.ECRunOVV(load=1).index
# === Checking expirements from index to analyzed data=== #
[
(i)
for i, gr in OnlyRecentMissingOVV.query('PAR_exp == "EIS"').groupby(
"SampleID"
)
if gr.Loading_cm2.nunique() > 1
]
[
(i)
for i, gr in postOVVout.query('PAR_exp == "EIS"').groupby("SampleID")
if gr.Loading_cm2.nunique() > 1
]
eismiss = OnlyRecentMissingOVV.loc[
OnlyRecentMissingOVV.PAR_file.isin(
[
i
for i in OnlyRecentMissingOVV.query(
'PAR_exp == "EIS"'
).PAR_file.values
if i not in postOVVout.PAR_file.values
]
)
].sort_values(
by="PAR_date",
) # 40
eismiss.to_excel(
FindExpFolder("VERSASTAT").PostDir.joinpath("OVV_EIS_missing.xlsx")
)
orrmiss = OnlyRecentMissingOVV.loc[
OnlyRecentMissingOVV.PAR_file.isin(
[
i
for i in OnlyRecentMissingOVV.query(
'PAR_exp == "ORR" & Electrode != "Pt_ring"'
).PAR_file.values
if i not in ORR_pars.PAR_file.values
]
)
].sort_values(
by="PAR_date",
) # 279
# orrmiss = OnlyRecentMissingOVV.loc[OnlyRecentMissingOVV.PAR_file.isin([i for i in OnlyRecentMissingOVV.query('PAR_exp == "ORR"').PAR_file.values if i not in ORR_pars.PAR_file.values])].sort_values(by='PAR_date',)
orrmiss.to_pickle(PostDestDir.joinpath("ORR_missing.pkl.compress"))
SampleSelection.EC_exp_cols + "SampleID" + EvRHE
for n, gr in Cdl_pars.groupby(
[i for i in SampleSelection.EC_exp_cols if i in Cdl_pars.columns]
):
fig, ax = plt.subplots()
for sID, sgr in gr.groupby("SampleID"):
sgr.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
ax=ax,
)
EIS_pars.query(SampleSelection.acid1500).query('Gas == "O2" & pH == 1 ').plot(
x="BET_cat_agg", y="Rct", kind="scatter", c="N_content", colormap="viridis"
)
mcls = [i for i in EIS_pars.columns if i in Cdl_pars.dropna(axis=1).columns]
mcls2 = [
i
for i in SampleSelection.EC_exp_cols + ["SampleID", "E_RHE"]
if i in EIS_pars.columns and i in Cdl_pars.dropna(axis=1).columns
]
mcls3 = [
i
for i in SampleSelection.EC_exp_cols + ["SampleID", "E_RHE"]
if i in EIS_pars.columns
and i in Cdl_pars.dropna(axis=1).columns
and i in ORR_pars_char.columns
]
[
(i, EIS_pars[i].dtypes, Cdl_pars[i].dtypes)
for i in mcls
if EIS_pars[i].dtypes != Cdl_pars[i].dtypes
]
EIS_Cdl = pd.merge(EIS_pars, Cdl_pars, on=mcls2, how="outer")
EIS_Cdl_ORR = pd.merge(EIS_Cdl, ORR_pars_char, on=mcls3, how="outer")
# [['E_RHE','Cdl','Cdlp']]
ECdl = EIS_Cdl.dropna(how="any", axis=0, subset=["Cdl", "Cdlp"])
ECdl_ORR = EIS_Cdl.dropna(how="any", axis=0, subset=["Cdl", "Cdlp"])
test1_alk = ECdl.query(
'(pH > 7) & (pH < 15) & (E_RHE > 0.494) & (E_RHE < 0.516) & (Sweep_Type_N2 == "cathodic")'
)
test1_acid = ECdl.query(
'(pH < 7) & (E_RHE > 0.494) & (E_RHE < 0.516) & (Sweep_Type_N2 == "cathodic")'
)
test1_alk.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in alkaline",
)
test1_alk.plot(
y="Cdl_corr",
x="Rct",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in alkaline",
)
test1_acid.plot(
y="Cdl",
x="Qad",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
)
test1_acid.plot(
y="Cdl",
x="Rct_kin",
c="BET_cat_agg_x",
colormap="jet",
kind="scatter",
title="Cdl in acid",
)
# HPRR_pars = pd.merge(HPRR_pars,postOVVout,on='PAR_file',how='left')
# print('Leftover SampleIDs: {0}'.format(set(HPRR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
# HPRR_pars = pd.merge(HPRR_pars,SampleCodes,on='SampleID',how='left')
# @@ Check POST_AST status from OVV and PRM...
print(
"Leftover SampleIDs: {0}".format(
set(ORR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())
)
)
ORR_pars = pd.merge(ORR_pars, SampleCodes, on="SampleID", how="left")
return HPRR_pars_ovv, EIS_pars_ovv
def get_EC_index():
EC_index = ECRunOVV(load=1).EC_index
# ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
SampleCodes = FindExpFolder().LoadSampleCode()
# SampleCodesChar().load
return EC_index, SampleCodes
@staticmethod
def check_missing_ECindex(OnlyRecentMissingOVV, DF_pars, clean_up=False):
not_in_index = DF_pars.loc[
~DF_pars.PAR_file.isin(OnlyRecentMissingOVV.PAR_file.values)
]
CleanUpCrew(list_of_files=not_in_index.sourceFilename.unique(), delete=clean_up)
return (
DF_pars.loc[DF_pars.PAR_file.isin(OnlyRecentMissingOVV.PAR_file.values)],
not_in_index,
)
@staticmethod
def add_missing_ECindex_cols(EC_index, DF):
if list(EC_index.columns.difference(DF.columns)):
DF = pd.merge(
DF,
EC_index[["PAR_file"] + list(EC_index.columns.difference(DF.columns))],
on="PAR_file",
how="left",
)
return DF
@staticmethod
def IndexPars_CB_paper():
postOVVout, SampleCodes = pd.DataFrame(), pd.DataFrame()
PostECddSeries = FindExpFolder("VERSASTAT").DestDir.joinpath(
"PostEC/{0}".format(SampleSelection.Series_CB_paper["name"])
)
PostECddSeries.mkdir(exist_ok=True, parents=True)
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=False
) # EIS_Pars2
HPRR_pars = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # HPRR
ORR_pars = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # ORR
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=False)
HER_pars = Load_from_Indexes.HER_pars_OVV(postOVVout, SampleCodes, reload=False)
OER_pars = Load_from_Indexes.OER_pars_OVV(postOVVout, SampleCodes, reload=False)
CBsamples = SampleSelection.Series_CB_paper["sIDs"]
EIS_CB_paper = EIS_pars.loc[EIS_pars.SampleID.isin(CBsamples)] # 7644
HPRR_CB_paper = HPRR_pars.loc[HPRR_pars.SampleID.isin(CBsamples)]
HPRR_CB_paper.to_excel(PostECddSeries.joinpath("HPRR_CB_paper.xlsx"))
ORR_CB_paper = ORR_pars.loc[ORR_pars.SampleID.isin(CBsamples)]
ORR_CB_paper.to_excel(PostECddSeries.joinpath("ORR_CB_paper.xlsx"))
Cdl_CB_paper = Cdl_pars.loc[Cdl_pars.SampleID.isin(CBsamples)]
Cdl_CB_paper.to_excel(PostECddSeries.joinpath("Cdl_CB_paper.xlsx"))
HER_CB_paper = HER_pars.loc[HER_pars.SampleID.isin(CBsamples)]
OER_CB_paper = OER_pars.loc[OER_pars.SampleID.isin(CBsamples)]
Cdl_CB_cath, Cdl_CB_an = Cdl_CB_paper.query(
'Sweep_Type_N2 == "cathodic"'
), Cdl_CB_paper.query('Sweep_Type_N2 == "anodic"')
merge_cols_catan = [i for i in Cdl_CB_cath.columns if i in Cdl_CB_an.columns]
Cdl_CB_catan = pd.merge(
Cdl_CB_cath,
Cdl_CB_an,
on=[i for i in merge_cols_catan if i not in SampleSelection.EC_N2Cdl_cols],
how="left",
suffixes=["_cat", "_an"],
)
Cdl_CB_catan["Cdl_sum"] = Cdl_CB_catan["Cdl_an"] + Cdl_CB_catan["Cdl_cat"]
return (
EIS_CB_paper,
HPRR_CB_paper,
ORR_CB_paper,
Cdl_CB_paper,
HER_CB_paper,
OER_CB_paper,
)
@staticmethod
def IndexPars_Porph_SiO2():
postOVVout, SampleCodes = pd.DataFrame(), pd.DataFrame()
serie = SampleSelection.Series_Porhp_SiO2["sIDslice"]
EIS_pars = Load_from_Indexes.EIS_pars_OVV(
postOVVout, SampleCodes, reload=False
) # EIS_Pars2
Cdl_pars = Load_from_Indexes.N2_pars_OVV(reload=False)
EIS_Porph_SiO2 = EIS_pars.loc[EIS_pars.SampleID.isin(serie)]
Cdl_Porph_SiO2 = Cdl_pars.loc[Cdl_pars.SampleID.isin(serie)]
Cdl_Porph_SiO2_cath, Cdl_Porph_SiO2_an = Cdl_Porph_SiO2.query(
'Sweep_Type_N2 == "cathodic"'
), Cdl_Porph_SiO2.query('Sweep_Type_N2 == "anodic"')
HPRR_pars_char = Load_from_Indexes.HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # HPRR
ORR_pars_char = Load_from_Indexes.ORR_pars_OVV(
postOVVout, SampleCodes, reload=False
) # ORR
HER_pars = Load_from_Indexes.HER_pars_OVV(postOVVout, SampleCodes, reload=False)
OER_pars = Load_from_Indexes.OER_pars_OVV(postOVVout, SampleCodes, reload=False)
HPRR_Porph_SiO2 = HPRR_pars_char.loc[HPRR_pars_char.SampleID.isin(serie)]
ORR_Porph_SiO2 = ORR_pars_char.loc[ORR_pars_char.SampleID.isin(serie)]
HER_Porph_SiO2 = HER_pars.loc[Cdl_pars.SampleID.isin(serie)]
OER_Porph_SiO2 = OER_pars.loc[Cdl_pars.SampleID.isin(serie)]
return ORR_Porph_SiO2
def test_update_from_index(pars, EC_index):
_olap = pars.columns.intersection(EC_index.columns)
_olap_minus = [i for i in _olap if not "PAR_file" == i]
_mtime = [i for i in pars.columns if i.endswith("delta_mtime")]
if _mtime:
_idx = pars[_mtime[0]].idxmin()
else:
_idx = 0
_ECidx = (
EC_index.loc[EC_index.PAR_file == pars.iloc[_idx].PAR_file][_olap]
.iloc[0]
.to_dict()
)
_prsx = pars.iloc[_idx][_olap].to_dict()
_check = {
key: {"pars": val, "EC_index": _ECidx.get(key, "xx")}
for key, val in _prsx.items()
if _ECidx.get(key, "xx") != val
}
_pars_bad = False
if _check:
_pars_bad = any(
"error" in str(i) for i in [i["pars"] for i in _check.values()]
)
if _pars_bad:
_logger.info(f"Overwriting columns in Pars from EC_index")
_new_pars = pd.merge(
pars[[i for i in pars.columns if i not in _olap_minus]],
EC_index[_olap],
on="PAR_file",
how="left",
)
else:
_new_pars = pars
return _new_pars
@staticmethod
def EIS_pars_OVV(
reload=False,
extra_plotting=False,
xls_out=False,
BRUTE_out=False,
use_daily=True,
use_latest=False,
**kwargs,
):
# IndexOVV_EISpars_fn_xls = PostDestDir.joinpath('EIS_pars_IndexOVV_v{0}.xlsx'.format(FileOperations.version))
# IndexOVV_EISpars_fn = PostDestDir.joinpath('EIS_pars_IndexOVV_v{0}.pkl.compress'.format(FileOperations.version))
# PostDestDir = Load_from_Indexes.PostDestDir
# FindExpFolder('VERSASTAT').PostDir
eis_daily = get_daily_pickle(exp_type="EIS_pars")
# today = dt.datetime.now().date()
# eis_daily_pickle_path = PostDestDir.joinpath(f'{today.year}-{today.month}-{today.day}_EIS_pars_{system()}.pkl.compress')
# eis_daily_pickle_path_RAW = PostDestDir.joinpath(f'{today.year}-{today.month}-{today.day}_EIS_pars_{system()}_RAW.pkl.compress')
if eis_daily.get("_exists", False) and not reload and use_daily:
EIS_pars = pd.read_pickle(eis_daily.get("daily_path"))
EIS_pars = FileOperations.ChangeRoot_DF(EIS_pars, [], coltype="string")
_logger.info(
f'Loaded EIS_pars OVV from daily {eis_daily["today"]} pickle: {eis_daily.get("daily_path","")}'
)
elif (
eis_daily.get("daily_options", [])
and not reload
and (use_latest or use_daily)
):
EIS_pars = pd.read_pickle(eis_daily.get("daily_options")[-1])
EIS_pars = FileOperations.ChangeRoot_DF(EIS_pars, [], coltype="string")
_logger.info(
f'Loaded EIS_pars OVV from daily {eis_daily.get("daily_options")[-1]} '
)
else:
# @@ Read EIS pars files and extend with columns from Samples
# try other way:: idx_files_EIS = [list(Path(i).rglob('**/EIS/*pars_v20.xlsx')) for i in OnlyRecentMissingOVV.Dest_dir.unique() if list(Path(i).rglob('**/EIS/*pars_v20.xlsx'))]
_logger.info(
f'START reloading EIS_pars OVV from daily {eis_daily["today"]}'
)
# OnlyRecentMissingOVV = ECRunOVV(load=1).index
## ['EXP_dir','Dest_dir','PAR_file','PAR_file_Ring', 'ORR_act_N2_bg','DestFile']
# OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(OnlyRecentMissingOVV,[])
# OnlyRecentMissingOVV.PAR_file = OnlyRecentMissingOVV.PAR_file.astype(str)
# OnlyRecentMissingOVV['Loading_cm2'] = OnlyRecentMissingOVV['Loading_cm2'].round(3)
# SampleCodes = SampleCodesChar().load
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
def read_df(_par_fls):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_par_fls)
if i.name.endswith("xlsx"):
_pp = pd.read_excel(i, index_col=[0])
elif i.name.endswith("pkl"):
_pp = pd.read_pickle(i)
_pp = FileOperations.ChangeRoot_DF(_pp, [], coltype="string")
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_pp = _pp.assign(
**{
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
)
yield _pp
except StopIteration:
return "all done"
print("gen empty")
# finally:
# yield _pp
# _pf = _pp.PAR_file.unique()[0]
# _pfstem = Path(_pf).stem
# _spectraf = list(Path(Path(i).parent).rglob(f'{_pfstem}_v{FileOperations.version}.xlsx' ))[0]
# _spectradf = pd.read_excel(_spectraf )
# yield _pp
# bn = 'O2_EIS-range_1500rpm_JOS1_285_5mV_1500rpm_pars_v20.xlsx'
EIS_OVV = EC_index.loc[EC_index.PAR_exp == "EIS"]
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
# +['PAR_file','Segment',EvRHE, 'RPM_DAC']
# [ Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' ) for d in EIS_OVV.Dest_dir.unique()]
_par_files = [
list(
Path(d.joinpath("EIS")).rglob(
f"*_pars_v{FileOperations.EIS_version}.xlsx"
)
)
for d in EIS_OVV.Dest_dir.unique()
]
_EIS_WB_files = [
list(Path(d.joinpath("EIS/lin_Warburg")).rglob(f"lin_Warburg*.pkl"))
for d in EIS_OVV.Dest_dir.unique()
]
_EIS_WB_fls = (a for i in _EIS_WB_files for a in i)
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
# tt = (i for i in _par_fls if bn in i.name)
# __ttp = list(read_df(tt, col_names))
if eis_daily.get("_raw_exists", False) and use_daily == True:
EIS_pars_all = pd.read_pickle(eis_daily.get("daily_path_RAW"))
elif (
not eis_daily.get("_raw_exists", False)
and use_daily == True
and eis_daily.get("daily_options_RAW")
):
EIS_pars_all = pd.read_pickle(eis_daily.get("daily_options_RAW")[-1])
else:
_pars_lst = list(read_df(_par_fls))
EIS_pars_RAW = pd.concat(_pars_lst, sort=False)
EIS_pars_RAW.sort_values("source_delta_mtime", inplace=True)
EIS_pars_RAW = EIS_pars_RAW.reset_index()
EIS_pars_all = EIS_pars_RAW
float_cols = set(
[
a
for i in EIS_pars_all.lmfit_var_names.unique()
if type(i) == str and not "(" in i
for a in i.split(", ")
]
)
float_cols.update(
set(
[
a
for i in float_cols
for a in EIS_pars_all.columns
if a.startswith(i)
]
)
)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].fillna(
0
)
# EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(float)
obj_flt_cols = [
i
for i in EIS_pars_all.columns
if str(EIS_pars_all[i].dtype) == "object" and i in float_cols
]
EIS_pars_all[obj_flt_cols] = EIS_pars_all[obj_flt_cols].replace("", 0)
EIS_pars_all[list(float_cols)] = EIS_pars_all[list(float_cols)].astype(
float
)
wrong_fls = [
EIS_pars_all.loc[
EIS_pars_all[i].astype(str).str.contains("Parameter")
]
for i in obj_flt_cols
]
if wrong_fls:
wrong_objflt_df = pd.concat(wrong_fls)
fix_dct = {
i: [
float(v.split("value=")[-1].split(",")[0])
for v in wrong_objflt_df[i].values
]
for i in obj_flt_cols
}
fixed_objflt_df = wrong_objflt_df.assign(**fix_dct)
EIS_pars_all = pd.concat(
[
EIS_pars_all.drop(index=wrong_objflt_df.index, axis=0),
fixed_objflt_df,
],
axis=0,
sort=True,
)
def _add_WB_pars(EIS_pars_all):
_WB_RAW_daily_path = eis_daily.get("daily_path_RAW_WB")
if _WB_RAW_daily_path.exists():
_EIS_WB_pars_all = pd.read_pickle(_WB_RAW_daily_path)
else:
_WB_lst = list(read_df(_EIS_WB_fls))
_EIS_WB_pars_all = pd.concat(
_WB_lst, sort=False, ignore_index=True
)
_EIS_WB_pars_all.to_pickle(_WB_RAW_daily_path)
_diffcols = set(EIS_pars_all.columns).difference(
_EIS_WB_pars_all.columns
)
_mcols = [
i
for i in set(EIS_pars_all.columns).intersection(
_EIS_WB_pars_all.columns
)
if i
not in [
"sourceFilename",
"source_mtime",
"source_delta_mtime",
"sourcebasename",
]
]
_dtype_mismatch = [
(i, EIS_pars_all[i].dtype, _EIS_WB_pars_all[i].dtype)
for i in _mcols
if EIS_pars_all[i].dtype != _EIS_WB_pars_all[i].dtype
]
if _dtype_mismatch:
_excl = []
for i in _dtype_mismatch:
try:
_EIS_WB_pars_all[i[0]] = _EIS_WB_pars_all[i[0]].astype(
i[1]
)
except Exception as e:
_excl.append(i[0])
print(i, "\n", e)
_mcols = [i for i in _mcols if i not in _excl]
# EIS_pars_all[i[0]] = EIS_pars_all[i[0]].astype(i[2])
_merge = pd.merge(
EIS_pars_all,
_EIS_WB_pars_all,
on=_mcols,
how="left",
suffixes=("", "_WB"),
)
if not _merge.empty:
return _merge
else:
print("WB merge was empty")
return EIS_pars_all
EIS_pars_all = _add_WB_pars(EIS_pars_all)
EIS_pars_all = EIS_pars_all.assign(
**{
"EIS_fake": [
"fakeZmean" in Path(i).name
for i in EIS_pars_all.PAR_file.to_numpy()
]
}
)
_not_in_index = EIS_pars_all.loc[
(
~(EIS_pars_all.PAR_file.isin(EC_index.PAR_file.values))
& (~EIS_pars_all.EIS_fake == True)
)
]
CleanUpCrew(
list_of_files=_not_in_index.sourceFilename.unique(), delete=True
)
EIS_pars_all = EIS_pars_all.iloc[
~(EIS_pars_all.index.isin(_not_in_index.index))
]
EIS_pars_all = Load_from_Indexes.test_update_from_index(
EIS_pars_all, EC_index
)
EIS_pars_all.to_pickle(eis_daily.get("daily_path_RAW"))
# EIS_pars_all = pd.read_pickle(eis_daily.get('daily_path_RAW'))
# === TAKING ONLY NEWEST FITTING PARS ===
#
# for n ,gr in EIS_pars_all.groupby(by=col_names):
# n,gr
E_dc_RHE_cols = [
(np.round(i, 3), np.round(i, 3) * 1e3)
for i in EIS_pars_all[EvRHE].values
]
EIS_pars_all = EIS_pars_all.assign(
**{
"E_dc_RHE": [i[0] for i in E_dc_RHE_cols],
"E_dc_RHE_mV": [i[1] for i in E_dc_RHE_cols],
}
)
EIS_pars_recent = EIS_pars_all.loc[
(EIS_pars_all.source_mtime > pd.Timestamp(dt.date(2020, 11, 25)))
& (EIS_pars_all.PAR_file.str.contains("None") == False)
]
EIS_pars_undup = EIS_pars_recent.dropna(subset=col_names).drop_duplicates(
keep="first"
)
# EIS_pars = EIS_pars.loc[EIS_pars.lmfit_var_names.str.contains('/(')]
# set([a for i in EIS_pars_all.lmfit_var_names.unique() if not '(' in i for a in i.split(', ')])
# === POST EDITING OF LOADED PARS ===
EIS_pars_undup = EIS_pars_undup.assign(
**{"Loading_cm2": EIS_pars_undup["Loading_cm2"].round(3)}
)
EIS_pars_undup = post_helper.make_uniform_EvRHE(EIS_pars_undup)
EIS_pars_undup = CollectPostOVV.MatchECconditions(EIS_pars_undup)
# EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(EC_index, EIS_pars_undup)
_oc_OVV = list(EIS_pars_undup.columns.intersection(EIS_OVV.columns))
if not set(EIS_OVV.groupby(_oc_OVV).groups.keys()).intersection(
EIS_pars_undup.groupby(_oc_OVV).groups.keys()
):
_drpcols = [
a
for a in EIS_pars_undup.columns
if (
a in [i for i in _oc_OVV if i not in "PAR_file"]
or "_".join(a.split("_")[0:-1])
in [i for i in _oc_OVV if i not in "PAR_file"]
)
]
# EIS_pars_undup.drop(columns =_drpcols)
EIS_pars_undup = Load_from_Indexes.add_missing_ECindex_cols(
EC_index, EIS_pars_undup.drop(columns=_drpcols)
)
# EIS_pars_undup = pd.merge(EIS_pars_undup,EIS_OVV,on=_oc_OVV, how='left')
_oc_SC = list(EIS_pars_undup.columns.intersection(SampleCodes.columns))
EIS_pars_undup = pd.merge(
EIS_pars_undup, SampleCodes, how="left", on=_oc_SC
)
EIS_pars_BRUTE = EIS_pars_undup.loc[
(EIS_pars_undup.BRUTE_FIT == 1) | (EIS_pars_undup.FINAL_FIT == 0)
]
if BRUTE_out:
EIS_pars_BRUTE.to_pickle(eis_daily["daily_path_BRUTE"])
EIS_pars = EIS_pars_undup.loc[(EIS_pars_undup.FINAL_FIT == 1)]
EIS_pars = EIS_extra_methods.add_best_model_per_spectrum(EIS_pars)
EIS_pars.to_pickle(eis_daily["daily_path"])
_logger.info(f'EIS_pars OVV to daily pickle: {eis_daily.get("daily_path")}')
_err_type = "lmfit_MSE"
_filter = "(EIS_pars.lmfit_MSE < 65E4) & (EIS_pars.Rct < 2E3) & (EIS_pars.Rct > 2E-2) \
& (EIS_pars.Rs > 0.01) & (EIS_pars.Rs < 200) & (EIS_pars.Cdlp < 0.075)\
& (EIS_pars.lmfit_redchi < 1E3) & (EIS_pars.Aw < 10E3) & (EIS_pars.Aw > 10E-2)\
& (EIS_pars.Qad < 1) & (EIS_pars.tau < 1E3)"
_filter += '& (EIS_pars.SampleID.str.contains("JOS1|JOS2|JOS3|JOS4|JOS5"))'
_filter += "& (EIS_pars.EIS_fake == False)"
_grps = ["Model_EEC", "Gas", "lmfit_var_names"][0:2]
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values("mean", ascending=True)
)
print(best_models)
keep_models = (
best_models.loc[(best_models["count"] > 5) & (best_models["std"] > 0)]
.index.get_level_values(0)
.unique()
)
EIS_pars = EIS_pars.loc[EIS_pars.Model_EEC.isin(keep_models)]
best_models = (
EIS_pars.loc[eval(_filter)]
.dropna(axis=0, subset=[_err_type])
.groupby(_grps)[_err_type]
.agg(["count", "mean", "std"])
.sort_values(["Gas", "mean"], ascending=True)
)
print(best_models)
if hasattr(EIS_pars, "best_mod_name"):
# EIS_best_mods = EIS_pars.loc[EIS_pars.Model_EEC_name.isin([i for i in EIS_pars.best_mod_name.unique() if not pd.isna(i)])]
EIS_best_mods = EIS_pars.loc[
EIS_pars.index.isin(
[i for i in EIS_pars.best_mod_n.unique() if not pd.isna(i)]
)
]
_agg = (
EIS_best_mods.dropna(subset=[_err_type])
.groupby(_grps + ["E_RHE"])[_err_type]
.agg(["count", "mean", "std"])
)
_agg_best = _agg.loc[_agg["count"] > 3].sort_values(
["Gas", "E_RHE", "mean"], ascending=True
)
# fast_checking_EEC_models =[(2, 'EEC_2CPEpRW',50),
# (3, 'EEC_2CPEpW',120),(4,'EEC_2CPE_W',100),
# (5, 'EEC_2CPE',100), (6,'EEC_Randles_RWpCPE_CPE',60)]
# # ['Model(Singh2015_RQRQR)', 'Model(Singh2015_RQRWR)', 'Model(Singh2015_R3RQ)', 'Model(Bandarenka_2011_RQRQR)' ]
if extra_plotting == "blocked":
for n, r in best_models.head(1).iterrows():
modname = r.name[0]
varnames = [
a
for i in EIS_pars.loc[
EIS_pars["Model_EEC"] == modname
].lmfit_var_names.unique()
for a in i.split(", ")
]
# [1]]+[fast_checking_EEC_models[4]]:
# modname = f'Model({_modname})'
EIS_pars_fltr = EIS_pars.loc[
(EIS_pars["Model_EEC"] == modname) & eval(_filter)
]
for var in varnames:
EIS_pars_fltr.query("pH < 7 & Rct < 2E3").plot(
y=var,
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
title=modname,
logy=0,
)
# .query('pH < 15').plot(y='Rs',x='E_RHE',c='pH',colormap='rainbow_r',kind='scatter',ylim=(0,100),title=modname)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 15").plot(
y="Qad",
x="E_RHE",
c="pH",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 0.05),
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="R_ion",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="tau",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0, 100),
title=modname,
)
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH < 7").plot(
y="Rct",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0.1, 1e4),
logy=True,
title=modname,
)
if (
not EIS_pars.loc[EIS_pars["Model_EEC"] == modname]
.query("pH > 7")
.empty
):
EIS_pars.loc[EIS_pars["Model_EEC"] == modname].query("pH > 7").plot(
y="Qad+Cdlp",
x="E_RHE",
c="BET_cat_agg",
colormap="rainbow_r",
kind="scatter",
ylim=(0.1, 1e-4),
logy=True,
title=modname,
)
plt.close()
# EIS_pars.query('pH < 17').groupby('Model_EEC').plot(y='RedChisqr',x='E_RHE',colormap='viridis',kind='scatter',ax=ax)
_porph = EIS_pars.loc[EIS_pars.PAR_file.str.contains("06.05")]
fig, ax = plt.subplots()
for n, Hgr in _porph.query("pH < 7").groupby("postAST"):
c_set = "g" if n == "no" else "r"
Hgr.plot(
x="E_RHE",
y="Rct_kin",
s=50,
c=c_set,
kind="scatter",
label=n,
title="EIS, E vs Qad at",
ax=ax,
ylim=(1e-6, 1),
logy=True,
)
plt.show()
plt.close()
if "update_index" in kwargs.keys():
pass
return EIS_pars
# dest_files.append({'index' : n, 'PAR_file' : str(r.PAR_file),'EIS_dest_dir' : EIS_dest_dir,
# 'EIS_dest_Pars' : EIS_dest_dir.joinpath( Path(r.PAR_file).stem + '_pars.xlsx'),
# 'EIS_dest_spectra' :EIS_dest_dir.joinpath( Path(r.PAR_file).stem + '_Combined.xlsx')
# })
# EIS_pars_index_p1 = postOVVout.query('Type_output == "EIS_Pars1"')
## EIS_pars_index_p2 = postOVVout.query('Type_output == "EIS_Pars2"')
# EIS_pars_indexes = postOVVout.query('Type_output == "EIS_Pars"')
# if 'source' in kwargs.keys():
# EIS_pars_indexes = EIS_pars_indexes.loc[EIS_pars_indexes.Source == kwargs.get('source','ExpDirs')]
## pars_index_from_read = EIS_get_index_column_names()
## EIS_pars_index = pd.concat([EIS_pars_index_p1,EIS_pars_index_p2])
## EIS_pars_index = postOVVout.groupby('Type_output').get_group('EIS_Pars1')
# EIS_pars_spectra = postOVVout.groupby('Type_output').get_group('EIS_AllData_combined').drop_duplicates(subset=['PAR_file','DestFile','Time_since_run'])
## EPtest = EIS_pars_indexes.loc[no_match] # a slice for testing purpose
## test_load_nm = no_matches.loc[no_matches[2].str.contains('Columns not matching! "Loading_cm2" values:'),0].values
## EPtest = EIS_pars_indexes.loc[EIS_pars_indexes.index.isin(test_load_nm)]
# EISlst,no_match,faillst = [],[],[]
@staticmethod
def HPRR_pars_OVV(
postOVVout, SampleCodes, reload=False, extra_plotting=False, xls_out=False
):
# exp_type = 'H
IndexOVV_HPRRpars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"Pars_IndexOVV_HPRR_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_HPRRpars_fn.exists() and reload != True:
HPRR_pars_char = pd.read_excel(IndexOVV_HPRRpars_fn, index_col=[0])
HPRR_pars_char = FileOperations.ChangeRoot_DF(
HPRR_pars_char, [], coltype="string"
)
else:
# === Making destination directories === #
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PPD_HPRR = PostDestDir.joinpath("HPRR")
PPD_HPRR.mkdir(parents=True, exist_ok=True)
PPD_HPRR_data = PPD_HPRR.joinpath("DataFiles")
PPD_HPRR_data.mkdir(parents=True, exist_ok=True)
# # === Loading Index files for HPRR and reading the Parameters files into one DataFrame === #
HPRR_pars_index = postOVVout.groupby("Type_output").get_group("HPRR")
HP_Pars_files = [
Path(i)
for i in HPRR_pars_index["SourceFilename"].unique()
if "_Pars" in Path(i).stem
]
HPRR_pars_raw = pd.concat(
[pd.read_excel(i, index_col=[0]) for i in HP_Pars_files], sort=False
)
HPRR_pars_raw = FileOperations.ChangeRoot_DF(
HPRR_pars_raw, [], coltype="string"
)
HPRR_merge_cols = [
i
for i in HPRR_pars_raw.columns
if i in HPRR_pars_index.columns and not "Segment" in i
]
HPRR_p2, HPRR_ovv2 = HPRR_pars_raw.set_index(
HPRR_merge_cols
), HPRR_pars_index.set_index(HPRR_merge_cols)
HPRR_pars_ovv = HPRR_p2.join(HPRR_ovv2, rsuffix="_ovv").reset_index()
HPRR_pars_merge_cols = [
i
for i in HPRR_pars_ovv.columns
if i in postOVVout.columns and not "Segment" in i and not "Unnamed" in i
]
HPRR_pars = pd.merge(
HPRR_pars_ovv, postOVVout, on=HPRR_pars_merge_cols, how="left"
)
# HPRR_pars = pd.merge(HPRR_pars_ovv,postOVVout,on='PAR_file',how='left')
print(
"Leftover SampleIDs: {0}".format(
set(HPRR_pars.SampleID.unique())
- set(SampleCodes.SampleID.unique())
)
)
HPRR_char_merge_cols = [
i
for i in HPRR_pars_ovv.columns
if i in SampleCodes.columns
if not "Unnamed" in i
]
HPRR_pars_char = pd.merge(
HPRR_pars_ovv, SampleCodes, on=HPRR_char_merge_cols, how="left"
)
HPRR_pars_char = HPRR_pars_char.drop(
columns=[i for i in HPRR_pars_char.columns if "Unnamed" in i]
)
new_IndexOVV_HPRRpars_target = FileOperations.CompareHashDFexport(
HPRR_pars_char, IndexOVV_HPRRpars_fn
)
_logger.info(
"PostEC HPRR re-indexed and saved: {0}".format(
new_IndexOVV_HPRRpars_target
)
)
if extra_plotting:
try:
HPRR_pars_char.query(
'(RPM_HPRR > 700) & (Loading_cm2 > 0.1) & (E_name == "E_j0")'
).plot(x="AD/AG", y="fit_slope_HPRR", kind="scatter")
except Exception as e:
print("HPRR plot fail:", e)
try:
HPRR_pars_char.query(
'(RPM_HPRR > 700) & (Loading_cm2 > 0.1) & (E_name == "E_j0")'
).plot(x="N_content", y="fit_slope_HPRR", kind="scatter")
except Exception as e:
print("HPRR plot fail:", e)
return HPRR_pars_char
@staticmethod
def HER_pars_OVV(reload=False, use_daily=True, extra_plotting=False, xls_out=False):
# exp_type = 'H
# PostDestDir = Load_from_Indexes.PostDestDir
her_daily = get_daily_pickle(exp_type="HER_pars")
# IndexOVV_HER_pars_fn = FindExpFolder('VERSASTAT').PostDir.joinpath('Pars_IndexOVV_HER_v{0}.pkl.compress'.format(FileOperations.version))
if her_daily.get("_exists", False) and reload != True:
# Cdl_pars_char = pd.read_excel(IndexOVV_N2_pars_fn,index_col=[0])
HER_pars_char = pd.read_pickle(her_daily.get("daily_path"))
HER_pars_char = FileOperations.ChangeRoot_DF(
HER_pars_char, [], coltype="string"
)
else:
# @@ Check POST_AST status from OVV and PRM
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
def read_df(_par_fls, read_types=["HER_pars"]):
# _ps = Path(d).rglob(f'*_pars_v{FileOperations.version}.xlsx' )
while True:
try:
i = next(_par_fls)
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_i_stem = i.stem
_pparts = i.parent.parts
if f"HER_v{FileOperations.version}" in _pparts[-2]:
if _i_stem.startswith("HER") or "HER" in _i_stem.split("_"):
# any([_i_stem.startswith(_p) for _p in ['N2_HER|N2_EIS']]):
_type = "HER_pars"
else:
_type = "HER_unknown"
else:
_type = "_unknown"
_meta = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"source_basename": _i_stem,
"source_type": _type,
}
if _type in read_types:
_pp = pd.read_excel(i, index_col=[0])
_pp = FileOperations.ChangeRoot_DF(
_pp, [], coltype="string"
)
_pp = _pp.assign(**_meta)
else:
_pp = pd.DataFrame(_meta, index=[0])
if not "Analysis_date" in _pp.columns:
_pp = _pp.assign(
**{
"Analysis_date": dt.datetime.fromtimestamp(
i.stat().st_ctime
)
}
)
_meta.update({"DF": _pp})
yield _meta
except StopIteration:
return "all done"
print("gen empty")
if her_daily.get("_raw_exists", False) and use_daily:
HER_pars_all = pd.read_pickle(her_daily.get("daily_path_RAW"))
elif her_daily.get("daily_options_RAW", False) and use_daily:
HER_pars_all = pd.read_pickle(her_daily.get("daily_options_RAW")[-1])
else: # Construct new N2 pars ovv from reading in files
HER_OVV = EC_index.loc[EC_index.PAR_exp.str.contains("HER")]
_par_files = [
list(
Path(d.joinpath(f"HER_v{FileOperations.version}")).rglob(
"*xlsx"
)
)
for d in HER_OVV.Dest_dir.unique()
]
_par_fls = (a for i in _par_files for a in i) # if 'EIS' in a.name)
_par_reads = read_df(_par_fls, read_types=["HER_pars"])
_reads_out = [i for i in _par_reads]
HER_pars_all = pd.concat(
[i["DF"] for i in _reads_out], sort=False, ignore_index=True
)
not_in_index = HER_pars_all.loc[
~HER_pars_all.PAR_file.isin(EC_index.PAR_file.values)
]
if not_in_index.empty:
print("HER pars, not-in-index is empty... success!")
else:
print("HER pars, not-in-index is NOT empty... delete wrong pars??")
# CleanUpCrew(list_of_files = not_in_index.SourceFilename.unique(), delete = True)
HER_pars_all = HER_pars_all.loc[
HER_pars_all.PAR_file.isin(EC_index.PAR_file.values)
]
HER_pars_recent = HER_pars_all.loc[
HER_pars_all.Analysis_date > dt.datetime.fromisoformat("2020-07-15")
]
for n, gr in HER_pars_recent.groupby("_type"):
print(
n,
f" len {len(gr)}",
f'\nSamples: {", ".join([str(i) for i in gr.SampleID.unique()])}',
)
HER_pars_recent.to_pickle(her_daily["daily_path_RAW"])
# ORR_merge_cols = [i for i in ORR_pars.columns if i in ORR_pars_index.columns and not 'Segment' in i]
# p2,ovv2 = ORR_pars.dropna(subset=ORR_merge_cols).set_index(ORR_merge_cols), ORR_pars_index.dropna(subset=ORR_merge_cols).set_index(ORR_merge_cols)
# ORR_pars_ovv = p2.join(ovv2,rsuffix='_ovv').reset_index()
# ORR_pars_ovv.query('(pH < 7)').plot(y='E_onset',x='Loading_cm2',kind='scatter',logy=False)
# ORR_pars_ovv = pd.merge(ORR_pars,ORR_pars_index,on=ORR_merge_cols,suffixes=('','_ovv'),how='left')
# ORR_pars = pd.merge(ORR_pars,postOVVout,on=['PAR_file','SampleID','Electrolyte','pH','postAST'],how='left',suffixes=('','_ovv'))
# print('Leftover SampleIDs: {0}'.format(set(ORR_pars.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
HER_pars_char = pd.merge(
HER_pars_recent, SampleCodes, on="SampleID", how="left"
)
HER_pars_char = pd.merge(
HER_pars_char, EC_index, on="PAR_file", suffixes=("", "_index")
)
### Fixing the pars after loading...
# TODO : taking out duplicates based on time_since_run....
Load_na = HER_pars_char.loc[HER_pars_char.Loading_cm2.isna()]
if not Load_na.empty:
Load_na_missingvalues = [
(n, *GetSampleID.ink_loading_from_filename(i.PAR_file))
for n, i in Load_na.iterrows()
]
Load_na_vals = (
pd.DataFrame(Load_na_missingvalues)
.rename(columns={1: "Loading_name", 2: "Loading_cm2"})
.set_index([0])
)
HER_pars_char.Loading_cm2.fillna(
value=Load_na_vals.Loading_cm2, inplace=True
)
# ORR_char_merge_cols = [i for i in ORR_pars.columns if i in SampleCodes.columns]
# ORR_pars_char = pd.merge(ORR_pars,SampleCodes,on=ORR_char_merge_cols,how='left')
HER_pars_char = HER_pars_char.drop(
columns=[i for i in HER_pars_char.columns if "Unnamed" in i]
)
if HER_pars_char.loc[HER_pars_char.Loading_cm2.isna()].empty == False:
HER_pars_char.Loading_cm2 = HER_pars_char.Loading_cm2.fillna(
value=0.379
) # fillna for Loading_cm2
HER_pars_char.Loading_cm2 = HER_pars_char.Loading_cm2.round(3)
HER_pars_char.HER_at_E_slice = HER_pars_char.HER_at_E_slice.round(3)
if HER_pars_char.postAST.dropna().empty:
HER_pars_char = HER_pars_char.drop(columns="postAST")
# _int = list(set(ORR_pars_char.columns).intersection(set(EC_index.columns)))
HER_pars_char = pd.merge(
HER_pars_char, EC_index, on="PAR_file", suffixes=("", "_index")
)
HER_pars_char = make_uniform_RPM_DAC(HER_pars_char)
# ORR_pars_char = pd.merge(ORR_pars_char, EC_index[['PAR_file', 'postAST']], on = 'PAR_file')
_sgdct = []
for pf, pfgrp in HER_pars_char.groupby("PAR_file"):
_segs = pfgrp["Segment #"].unique()
for _n, _seg in enumerate(_segs):
_sgdct.append({"PAR_file": pf, "Segment #": _seg, "HER_Segnum": _n})
_HER_segnums = pd.DataFrame(_sgdct)
HER_pars_char = pd.merge(
HER_pars_char, _HER_segnums, on=["PAR_file", "Segment #"]
)
# ORR_pars_char.loc[ORR_pars_char.Loading_cm2.isna() == True]
# if xls_out:
# IndexOVV_HER_pars_fn = FileOperations.CompareHashDFexport(HER_pars_char,IndexOVV_HER_pars_fn)
HER_pars_char.to_pickle(her_daily["daily_path"])
if extra_plotting:
jmA2_slice = HER_pars_char.loc[(HER_pars_char["Segment #"] > 1)].query(
'(HER_type == "j_slice_onset") & (HER_at_J_slice == -2)'
)
jmA2_slice.plot(
x="Metal_wt", y="HER_Tafel_slope", kind="scatter", ylim=(0, 1e3)
)
jmA2_slice.plot(
x="N_content",
y="HER_Tafel_slope",
s=50,
c="g",
kind="scatter",
ylim=(0, 1e3),
)
# HER_atE = HER_pars_char.loc[(HER_pars_char['Segment #'] > 1) & np.isclose(HER_pars_char[EvRHE+'_upper'],-0.3,atol=0.02)].query('(E_type == "E_slice")')
if extra_plotting:
E_350mV_slice = HER_pars_char.loc[
(HER_pars_char["Segment #"] > 1)
].query(
'(HER_type == "E_slice") & (HER_at_E_slice < -0.29) & (HER_at_E_slice > -0.33)'
)
fig, ax = plt.subplots()
for n, Hgr in E_350mV_slice.groupby(["postAST", "RPM"]):
c_set = "g" if "no" in n else "r"
_ms_set = "o" if n[-1] < 100 else "*"
Hgr.plot(
x="N_content",
y="HER_J_upper",
s=50,
c=c_set,
kind="scatter",
label=n,
title="HER at -0.3 Vrhe, j vs N_content",
ax=ax,
**{"marker": _ms_set},
)
E_350mV_slice.plot(
x="N_content",
y="HER_J_upper",
kind="bar",
title="HER, j vs N_content at",
)
E_350mV_slice.plot(
x="BET_cat_agg",
y="HER_J_upper",
s=50,
c="g",
kind="scatter",
title="HER, j vs N_content at",
)
return HER_pars_char
def old_HER():
if IndexOVV_HER_pars_fn.exists() and reload is not True:
HER_pars_char = pd.read_pickle(IndexOVV_HER_pars_fn)
if HER_pars_char.SourceFilename.iloc[0].exists() == False:
HER_pars_char = FileOperations.ChangeRoot_DF(
HER_pars_char, [], coltype="string"
)
# ORR_pars_char = ORR_pars_char.drop_duplicates(subset=ORR_pars_char.columns[0:19])
elif reload == "pickle":
IndexOVV_HER_pars_fn_pkl = list(
PostDestDir.rglob(
f"{today.year}-{today.month}-*_HER_pars_{system()}.pkl.compress"
)
)[-1]
HER_pars_char = pd.read_pickle(IndexOVV_HER_pars_fn_pkl)
if postOVVout.empty or SampleCodes.empty:
reload = False
if IndexOVV_HER_pars_fn.exists() and reload != True:
HER_pars_char = pd.read_excel(IndexOVV_HER_pars_fn, index_col=[0])
HER_pars_char = FileOperations.ChangeRoot_DF(
HER_pars_char, [], coltype="string"
)
else:
# === Making destination directories === #
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PPD_HER_OER = PostDestDir.joinpath("HER_OER")
PPD_HER_OER.mkdir(parents=True, exist_ok=True)
PPD_HER_OER_data = PPD_HER_OER.joinpath("DataFiles")
PPD_HER_OER_data.mkdir(parents=True, exist_ok=True)
# # === Loading Index files for HPRR and reading the Parameters files into one DataFrame === #
HER_pars_index = postOVVout.groupby("Type_output").get_group(
"HER_Jkin_Tafel"
)
# HP_Pars_files = [i for i in HER_pars_index['SourceFilename'].unique() if '_pars' in i.stem]
HER_pars_raw = pd.concat(
[
pd.read_excel(i, index_col=[0])
for i in HER_pars_index["SourceFilename"].unique()
]
)
HER_pars_raw = FileOperations.ChangeRoot_DF(
HER_pars_raw,
[i for i in HER_pars_raw.columns if re.search("([F-f]ile)", i)],
coltype="string",
)
HER_merge_cols = [
i
for i in HER_pars_raw.columns
if i in HER_pars_index.columns
and not "Segment" in i
and not "Sweep_Type" in i
]
HER_p2, HER_ovv2 = HER_pars_raw.set_index(
HER_merge_cols
), HER_pars_index.set_index(HER_merge_cols)
HER_pars_ovv = HER_p2.join(HER_ovv2, rsuffix="_ovv").reset_index()
# HER_pars = pd.merge(HER_pars_ovv,postOVVout,on=HEpars_merge_cols,how='left')
# OER_pars = pd.merge(HPRR_pars_ovv,postOVVout,on=HPRR_pars_merge_cols,how='left')
# # HPRR_pars = pd.merge(HPRR_pars_ovv,postOVVout,on='PAR_file',how='left')
# print('Leftover SampleIDs: {0}'.format(set(HER_.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
HER_char_merge_cols = [
i for i in HER_pars_ovv.columns if i in SampleCodes.columns
]
HER_pars_char = pd.merge(
HER_pars_ovv, SampleCodes, on=HER_char_merge_cols, how="left"
)
new_IndexOVV_HERpars_target = FileOperations.CompareHashDFexport(
HER_pars_char, IndexOVV_HER_pars_fn
)
_logger.info(
"PostEC HPRR re-indexed and saved: {0}".format(
new_IndexOVV_HERpars_target
)
)
@staticmethod
def OER_pars_OVV(
postOVVout, SampleCodes, reload=False, extra_plotting=False, xls_out=False
):
# exp_type = 'H
IndexOVV_OERpars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"Pars_IndexOVV_OER_v{0}.xlsx".format(FileOperations.version)
)
if IndexOVV_OERpars_fn.exists() and reload != True:
OER_pars_char = pd.read_excel(IndexOVV_OERpars_fn, index_col=[0])
OER_pars_char = FileOperations.ChangeRoot_DF(
OER_pars_char, [], coltype="string"
)
else:
# === Making destination directories === #
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PPD_HER_OER = PostDestDir.joinpath("HER_OER")
PPD_HER_OER.mkdir(parents=True, exist_ok=True)
PPD_HER_OER_data = PPD_HER_OER.joinpath("DataFiles")
PPD_HER_OER_data.mkdir(parents=True, exist_ok=True)
# # === Loading Index files for HPRR and reading the Parameters files into one DataFrame === #
OER_pars_index = postOVVout.groupby("Type_output").get_group(
"OER_Jkin_Tafel"
)
OER_pars_raw = pd.concat(
[
pd.read_excel(i, index_col=[0])
for i in OER_pars_index["SourceFilename"].unique()
]
)
OER_pars_raw = FileOperations.ChangeRoot_DF(
OER_pars_raw,
[i for i in OER_pars_raw.columns if re.search("([F-f]ile)", i)],
coltype="string",
)
OER_merge_cols = [
i
for i in OER_pars_raw.columns
if i in OER_pars_index.columns
and not "Segment" in i
and not "Sweep_Type" in i
]
OER_p2, OER_ovv2 = OER_pars_raw.set_index(
OER_merge_cols
), OER_pars_index.set_index(OER_merge_cols)
OER_pars_ovv = OER_p2.join(OER_ovv2, rsuffix="_ovv").reset_index()
# HER_pars = pd.merge(HER_pars_ovv,postOVVout,on=HEpars_merge_cols,how='left')
# OER_pars = pd.merge(HPRR_pars_ovv,postOVVout,on=HPRR_pars_merge_cols,how='left')
# # HPRR_pars = pd.merge(HPRR_pars_ovv,postOVVout,on='PAR_file',how='left')
# print('Leftover SampleIDs: {0}'.format(set(HER_.SampleID.unique()) - set(SampleCodes.SampleID.unique())))
OER_char_merge_cols = [
i
for i in OER_pars_ovv.columns
if i in SampleCodes.columns and not "Unnamed" in i
]
OER_pars_char = pd.merge(
OER_pars_ovv, SampleCodes, on=OER_char_merge_cols, how="left"
)
new_IndexOVV_OERpars_target = FileOperations.CompareHashDFexport(
OER_pars_char, IndexOVV_OERpars_fn
)
_logger.info(
"PostEC OER re-indexed and saved: {0}".format(
new_IndexOVV_OERpars_target
)
)
OER_pars_char.loc[(OER_pars_char["Segment #"] > 1)].query(
'(E_type == "E_onset")'
).plot(x="AD/AG", y="TafelSlope", kind="scatter")
OER_pars_char.loc[(OER_pars_char["Segment #"] > 1)].query(
'(E_type == "E_onset")'
).plot(x="N_content", y="TafelSlope", s=50, c="g", kind="scatter")
if extra_plotting:
OER_atE = OER_pars_char.loc[
(OER_pars_char["Segment #"] > 1)
& np.isclose(OER_pars_char[EvRHE + "_upper"], 1.7, atol=0.02)
].query('(E_type == "E_slice")')
fig, ax = plt.subplots()
for n, Ogr in OER_atE.groupby("postAST"):
c_set = "g" if n == "no" else "r"
Ogr.plot(
x="N_content",
y="j_upper",
s=50,
c=c_set,
kind="scatter",
label=n,
title="OER, j vs N_content at",
ax=ax,
)
return OER_pars_char
@staticmethod
def ORR_pars_OVV(reload=False, extra_plotting=False, xls_out=False, use_daily=True):
# exp_type = 'H
PostDestDir = Load_from_Indexes.PostDestDir
orr_daily = get_daily_pickle(exp_type="ORR_pars")
IndexOVV_ORRpars_fn = FindExpFolder("VERSASTAT").PostDir.joinpath(
"Pars_IndexOVV_ORR_v{0}.pkl.compress".format(FileOperations.version)
)
if IndexOVV_ORRpars_fn.exists() and reload is not True:
# ORR_pars_char = pd.read_excel(IndexOVV_ORRpars_fn,index_col=[0])
ORR_pars_char = | pd.read_pickle(IndexOVV_ORRpars_fn) | pandas.read_pickle |
import numpy as np
import pandas as pd
from copy import copy, deepcopy
from matplotlib import pyplot as plt
from datetime import datetime, timedelta
from matplotlib.backends.backend_pdf import PdfPages
dfheight= | pd.read_csv('../data/raw/Results from Val_Roseg_Timelapse in µm per sec.csv') | pandas.read_csv |
Subsets and Splits