prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""Data_Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/106zvCx_5_p0TlKI3zkCcEb0VbnWwdahx
"""
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
import re
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
"""
1. Preprocessing Functions:
"""
def calc_change_sentiment(data):
change_in_sent = []
change_in_sent.append(data['compound'][0])
for i in range(1,len(data['compound'])):
if data['compound'][i] == 0:
change_in_sent.append(0)
elif data['compound'][i] < 0 or data['compound'][i] > 0:
dif = data['compound'][i] - data['compound'][(i-1)]
change_in_sent.append(dif)
return change_in_sent
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, '', input_txt)
return input_txt
def clean_tweets(tweets):
tweets = np.vectorize(remove_pattern)(tweets, "RT @[\w]*:")
tweets = np.vectorize(remove_pattern)(tweets, "@[\w]*")
tweets = np.vectorize(remove_pattern)(tweets, "https?://[A-Za-z0-9./]*")
tweets = np.core.defchararray.replace(tweets, "[^a-zA-Z]", " ")
return tweets
def classify_news(dataframe):
day23, day24, day25, day26, day27, day28, day29, day30, day31, day32, day33, day34, day35, day36, day37, day38 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
for i in range(len(dataframe['timestamp'])):
if dataframe['timestamp'][i].day == 23 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day23.append(i)
elif dataframe['timestamp'][i].day == 24 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day24.append(i)
elif dataframe['timestamp'][i].day == 25 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day25.append(i)
elif dataframe['timestamp'][i].day == 26 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day26.append(i)
elif dataframe['timestamp'][i].day == 27 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day27.append(i)
elif dataframe['timestamp'][i].day == 28 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day28.append(i)
elif dataframe['timestamp'][i].day == 29 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day29.append(i)
elif dataframe['timestamp'][i].day == 30 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day30.append(i)
elif dataframe['timestamp'][i].day == 1 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day31.append(i)
elif dataframe['timestamp'][i].day == 2 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day32.append(i)
elif dataframe['timestamp'][i].day == 3 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day33.append(i)
elif dataframe['timestamp'][i].day == 4 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day34.append(i)
elif dataframe['timestamp'][i].day == 5 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day35.append(i)
elif dataframe['timestamp'][i].day == 6 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day36.append(i)
elif dataframe['timestamp'][i].day == 7 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day37.append(i)
elif dataframe['timestamp'][i].day == 8 and (dataframe['timestamp'][i].hour <= 15 and dataframe['timestamp'][i].hour >= 9):
day38.append(i)
else:
pass
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = dataframe.iloc[day23],dataframe.iloc[day24],dataframe.iloc[day25], dataframe.iloc[day26], dataframe.iloc[day27],dataframe.iloc[day28],dataframe.iloc[day29],dataframe.iloc[day30],dataframe.iloc[day31], dataframe.iloc[day32],dataframe.iloc[day33],dataframe.iloc[day34],dataframe.iloc[day35],dataframe.iloc[day36],dataframe.iloc[day37],dataframe.iloc[day38]
return news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38
def preprocess_headlines(data):
data.drop_duplicates(subset='headline',keep=False, inplace=True)
data.drop(['ticker','neg','neu','pos'], axis=1, inplace=True)
data.rename(columns={'date_time':'timestamp'},inplace=True)
data.set_index('timestamp', inplace=True)
data_30m = data.resample('30min').median().ffill().reset_index()
headline_sma = data_30m['compound'].rolling(3).mean()
data_30m['Compound SMA(3) Headlines'] = headline_sma
change_in_sent=calc_change_sentiment(data_30m)
data_30m['change in sentiment headlines'] = change_in_sent
data_30m['change in sentiment headlines (t-1)'] = data_30m['change in sentiment headlines'].shift(1)
news_d23,news_d24,news_d25,news_d26,news_d27,news_d28,news_d29,news_d30,news_d31,news_d32,news_d33,news_d34,news_d35,news_d36,news_d37,news_d38 = classify_news(data_30m)
news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red = news_d23.iloc[4:],news_d24.iloc[1:],news_d25.iloc[1:],news_d28.iloc[1:],news_d29.iloc[1:],news_d30.iloc[1:],news_d31.iloc[1:],news_d32.iloc[1:],news_d35.iloc[1:],news_d36.iloc[1:],news_d37.iloc[1:],news_d38.iloc[1:]
frames_news = [news_d23_red,news_d24_red, news_d25_red, news_d28_red,news_d29_red,news_d30_red,news_d31_red,news_d32_red,news_d35_red,news_d36_red,news_d37_red,news_d38_red]
processed_headlines = pd.concat(frames_news)
return processed_headlines
def preprocess_posts(dataframe):
dataframe.drop(['neg','neu','pos','followers_count'],axis=1,inplace=True)
dataframe['timestamp'] = dataframe['timestamp'].dt.tz_localize('UTC').dt.tz_convert('America/Montreal').dt.tz_localize(None)
dataframe.set_index('timestamp', inplace=True)
twitter_df_30m = dataframe.resample('30min').median().ffill().reset_index()
change_in_sent = calc_change_sentiment(twitter_df_30m)
twitter_sma = twitter_df_30m['compound'].rolling(3).mean()
twitter_df_30m['Compound SMA(3) Twitter'] = twitter_sma
twitter_df_30m['change in sentiment twitter'] = change_in_sent
twitter_df_30m['change in sentiment twitter (t-1)'] = twitter_df_30m['change in sentiment twitter'].shift(1)
tw_news_d23,tw_news_d24,tw_news_d25,tw_news_d26,tw_news_d27,tw_news_d28,tw_news_d29,tw_news_d30,tw_news_d31,tw_news_d32,tw_news_d33,tw_news_d34,tw_news_d35,tw_news_d36,tw_news_d37,tw_news_d38 = classify_news(twitter_df_30m)
tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m, tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m = tw_news_d23.iloc[4:],tw_news_d24.iloc[1:],tw_news_d25.iloc[1:],tw_news_d28.iloc[1:],tw_news_d29.iloc[1:],tw_news_d30.iloc[1:],tw_news_d31.iloc[1:],tw_news_d32.iloc[1:],tw_news_d35.iloc[1:],tw_news_d36.iloc[1:],tw_news_d37.iloc[1:],tw_news_d38.iloc[1:]
frames = [tw_news_d23_30m,tw_news_d24_30m,tw_news_d25_30m,tw_news_d28_30m,tw_news_d29_30m,tw_news_d30_30m,tw_news_d31_30m,tw_news_d32_30m,tw_news_d35_30m,tw_news_d36_30m,tw_news_d37_30m,tw_news_d38_30m]
processed_tweets = pd.concat(frames)
return processed_tweets
"""2 Modeling Functions:"""
def baseline_model(data):
pred = data['SMA(3)'][3:]
actu = data['Adj Close'][3:]
rmse = np.sqrt(mean_squared_error(actu,pred))
r2_sco = r2_score(actu,pred)
return rmse, r2_sco
def linear_modeling_no_sentiment(dataframe):
x_var = ['Adj Close','Scaled Volume','SMA(3)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][3:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][3:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
return rmse,r2_sco,rmse2,r2_sco2
def linear_modeling_headlines(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Headlines','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def linear_model_twitter(dataframe):
x_var = ['Adj Close','Scaled Volume','compound','Compound SMA(3) Twitter','SMA(3)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
# xg_reg = xgb.XGBRegressor(colsample_bytree= 0.4, gamma= 0.4, learning_rate= 0.05, max_depth= 4, n_estimators= 10000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds4 = svr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test,preds4))
r2_sco4 = r2_score(y_test,preds4)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4
def multi_model_full(dataframe):
x_var = ['Adj Close','Scaled Volume','compound_y','compound_x','Compound SMA(3) Headlines','Compound SMA(3) Twitter','SMA(3)','change in sentiment headlines','change in sentiment headlines (t-1)','change in sentiment twitter','change in sentiment twitter (t-1)']
i = len(dataframe['Percent Price Change Within Period (t+1)'])-4
y_train, y_test = dataframe['Percent Price Change Within Period (t+1)'][:i], dataframe['Percent Price Change Within Period (t+1)'][i:-1]
X_train, X_test = dataframe[x_var][:i], dataframe[x_var][i:-1]
lm = LinearRegression()
lm.fit(X_train,y_train)
predictions = lm.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test,predictions))
r2_sco = r2_score(y_test,predictions)
reg = SGDRegressor(random_state=42)
reg.fit(X_train, y_train)
predictions2 = reg.predict(X_test)
rmse2 = np.sqrt(mean_squared_error(y_test,predictions2))
r2_sco2 = r2_score(y_test,predictions2)
xg_reg = xgb.XGBRegressor(colsample_bytree= 0.3, gamma= 0.0, learning_rate= 0.2, max_depth= 5, n_estimators= 20000)
xg_reg.fit(X_train,y_train)
preds3 = xg_reg.predict(X_test)
rmse3 = np.sqrt(mean_squared_error(y_test, preds3))
r2_sco3 = r2_score(y_test,preds3)
rf_regr = RandomForestRegressor(n_estimators=20, max_depth=600, random_state=42)
rf_regr.fit(X_train,y_train)
preds4 = rf_regr.predict(X_test)
rmse4 = np.sqrt(mean_squared_error(y_test, preds4))
r2_sco4 = r2_score(y_test,preds4)
svr = SVR(kernel='rbf', C=0.01, epsilon=0.001)
svr.fit(X_train,y_train)
preds5 = svr.predict(X_test)
rmse5 = np.sqrt(mean_squared_error(y_test,preds5))
r2_sco5 = r2_score(y_test,preds5)
return rmse,r2_sco,rmse2,r2_sco2,rmse3,r2_sco3,rmse4,r2_sco4,rmse5,r2_sco5
"""## 2. Evaluate Model with Individual Stocks:"""
def import_data(ticker):
# 1. Historical Stock Data:
stock_df = pd.read_csv('Dataset/1.Stock_Data/'+ticker+'_data.csv', index_col=0, parse_dates=['Datetime'])
stock_df['Percent Price Change Within Period (t+1)'] = stock_df['Percent Price Change Within Period'].shift(-1)
# 2. Headline Data:
headlines1 = | pd.read_csv('Dataset/2.FinViz_Headline_Data/'+ticker+'_2020-09-23_2020-10-07.csv', index_col=0, parse_dates=['date_time']) | pandas.read_csv |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import json
import pandas as pd
import time
import numpy as np
pd.set_option('display.max_colwidth', -1)
##GENERAL
import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import random
from collections import Counter
import csv
import re
import concurrent.futures
##SPACY
import spacy
nlp = spacy.load('en_core_web_sm')
#nlp = spacy.load('en_core_web_md')
#nlp = spacy.load('en_core_web_lg')
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
from spacy.pipeline import SentenceSegmenter
from spacy import displacy
#DATA LOADING AND PROFILING
##Reading Through Chunk Size Parameter
data_preprocessing_start_time = time.time()
chunk_start=time.time()
path = '/Users/ankitkothari/Documents/ONGOING_PROJECTS/optimum/yelp_academic_dataset_review.json'
chunk_iter = pd.read_json(path, lines=True, chunksize=50000)
chunk_list = []
for chunk in chunk_iter:
chunk_list.append(chunk)
data = pd.concat(chunk_list)
chunk_time = (time.time()-chunk_start)/60
print(f'Time Taken for chunking {chunk_time:03.2f} mins')
print(data.head())
print(f' Memory usage in MB {data.memory_usage(deep=True).sort_values()/(1024*1024)}')
print(f'data types {data.info(memory_usage="deep")}')
print(f'data size {data.size}')
for dtype in ['float','int','object']:
selected_dtype = data.select_dtypes(include=[dtype])
mean_usage_b = selected_dtype.memory_usage(deep=True).mean()
mean_usage_mb = mean_usage_b / 1024 ** 2
print("Average memory usage for {} columns: {:03.2f} MB".format(dtype,mean_usage_mb))
def mem_usage(pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
data_reduced = data.copy()
data_int = data_reduced.select_dtypes(include=['int'])
converted_int = data_int.apply(pd.to_numeric, downcast='signed')
print(mem_usage(data_int))
print(mem_usage(converted_int))
compare_ints = | pd.concat([data_int.dtypes,converted_int.dtypes],axis=1) | pandas.concat |
import os
import json
import shutil
import pandas as pd
from pprint import pprint
from multiprocessing import Pool
import logging
_logger = logging.getLogger(__name__)
def worker(i: int):
_logger.error("I am worker "+str(i))
if __name__ == '__main__':
FILE_FORMATTER = "[%(levelname)s] - %(asctime)s - %(name)s - %(message)s"
FILE_LOGGING_LEVEL = logging.INFO
LOG_FILENAME = 'dsbox.log'
logging.basicConfig(
level=FILE_LOGGING_LEVEL,
format=FILE_FORMATTER,
datefmt='%m-%d %H:%M:%S',
filename=LOG_FILENAME,
)
with Pool(4) as p:
p.map(worker, range(4))
def _process_pipeline_submission() -> None:
output_directory = "/dsbox_efs/runs/0807-run3/seed/38_sick"
pipelines_root: str = os.path.join(output_directory, 'pipelines')
executables_root: str = os.path.join(output_directory, 'executables')
supporting_root: str = os.path.join(output_directory, 'supporting_files')
# Read all the json files in the pipelines
piplines_name_list = os.listdir(pipelines_root)
if len(piplines_name_list) < 20:
return
pipelines_df = | pd.DataFrame(0.0, index=piplines_name_list, columns=["rank"]) | pandas.DataFrame |
#python features.py <limit> <dump file for features>.pkl
#python -W ignore -u features.py 0 ../data/data.pkl
#python -W ignore -u features.py 1035650 ../data/data.pkl
import datetime
import pymongo
import pandas as pd
from math import log
import time
import sys
from scipy.stats import linregress
import pickle
import numpy as np
client = pymongo.MongoClient()
db = client['bitfinex']
timestamp_format = "%Y-%m-%d %H:%M:%S.%f"
def get_formatted_time_string(this_time):
return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format)
def get_book_df(symbol, limit, convert_timestamps=False):
'''
Returns a DataFrame of book data
'''
books_db = db[symbol+'_books']
cursor = books_db.find().sort('_id', -1).limit(limit)
books = pd.DataFrame(list(cursor))
books = books.set_index('_id')
if convert_timestamps:
books.index = pd.to_datetime(books.index, unit='s')
def to_df(x):
return pd.DataFrame(x[:10])
return books.applymap(to_df).sort_index()
def get_width_and_mid(books):
'''
Returns width of best market and midpoint for each data point in DataFrame
of book data
'''
best_bid = books.bids.apply(lambda x: x.price[0])
best_ask = books.asks.apply(lambda x: x.price[0])
return best_ask-best_bid, (best_bid + best_ask)/2
#Since asks/bids seem to be repeating in books for a while, at most (observed so far) every 15 seconds, we want to get the future mid within plus/minus 25 seconds
def get_future_mid(books, offset, sensitivity):
'''
Returns percent change of future midpoints for each data point in DataFrame
of book data
'''
def future(timestamp):
i = books.index.get_loc(timestamp+offset, method='nearest')
if abs(books.index[i] - (timestamp+offset)) < sensitivity:
return books.mid.iloc[i]
return (books.index.map(future)/books.mid).apply(log)
def get_power_imbalance(books, n=10, power=2):
'''
Returns a measure of the imbalance between bids and offers for each data
point in DataFrame of book data
'''
def calc_imbalance(book):
def calc(x):
return 0 if x.price-book.mid==0 else x.amount*(.5*book.width/(x.price-book.mid))**power
bid_imbalance = book.bids.iloc[:n].apply(calc, axis=1)
ask_imbalance = book.asks.iloc[:n].apply(calc, axis=1)
return (bid_imbalance-ask_imbalance).sum()
imbalance = books.apply(calc_imbalance, axis=1)
return imbalance
def get_power_adjusted_price(books, n=10, power=2):
'''
Returns the percent change of an average of order prices weighted by inverse
distance-wieghted volume for each data point in DataFrame of book data
'''
def calc_adjusted_price(book):
def calc(x):
return x.amount*(.5*book.width/(x.price-book.mid))**power
bid_inv = 1/book.bids.iloc[:n].apply(calc, axis=1)
ask_inv = 1/book.asks.iloc[:n].apply(calc, axis=1)
bid_price = book.bids.price.iloc[:n]
ask_price = book.asks.price.iloc[:n]
return (bid_price*bid_inv + ask_price*ask_inv).sum() /\
(bid_inv + ask_inv).sum()
adjusted = books.apply(calc_adjusted_price, axis=1)
return (adjusted/books.mid).apply(log).fillna(0)
def get_trade_df(symbol, min_ts, max_ts, convert_timestamps=False):
'''
Returns a DataFrame of trades for symbol in time range
'''
trades_db = db[symbol+'_trades']
query = {'timestamp': {'$gt': min_ts, '$lt': max_ts}}
cursor = trades_db.find(query).sort('_id', pymongo.ASCENDING)
trades = pd.DataFrame(list(cursor))
if not trades.empty:
trades = trades.set_index('_id')
if convert_timestamps:
trades.index = | pd.to_datetime(trades.index, unit='s') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 31 11:31:27 2018
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 15:29:07 2018
@author: wuxf
"""
import queue
import cx_Oracle
import pandas as pd
import numpy as np
import time
import json
class WorkSheetModelPredict:
"""
工单分布分析类。以一张表为源表,其ID列为源列,对其中每一条记录(以id标记),
查询与其直接或间接关联的表中是否存在关联记录。存在则标记1,不存在则标记0.
由此,对于源表中的每一条记录,计算出一个向量,其长度为输入表的个数,每个
分量为上述标记。
对于源表中的所有记录,求出上述向量,然后按唯一值分组,最后求出百分比,
作为分布描述。
"""
##根据提供的数据库地址和表明,将表存在datamap中
def getDataMap(self, config):
dataMap = {}
for i in range(len(config)):
user = config[i]['user']
pwd = config[i]['password']
host = config[i]['host']
port = config[i]['port']
db = config[i]['database']
url = host + ':' + port + '/' + db
conn = cx_Oracle.connect(user, pwd, url)
for j in range(len(config[i]['tables'])):
key = config[i]['tables'][j]
value = pd.read_sql('select * from ' + key, conn)
if len(value) > 0:
dataMap[key] = value
print('读库数据结束...')
return dataMap
# 根据存储的每个表的前置pre结果,获取从源表到目标表的连通路径。
# 这里每个表的位置,由其name在tableNames中的下标表示。
def getPath(self, pres, end, path):
if pres[end] == -1:
path.append(end)
return path
else:
path.append(end)
return self.getPath(pres, pres[end], path)
# 分析start到end的连通性,然后获取其连通路径。
# 若连通,则调用getPath()获取其路径;否则返回[]。
def getRoad(self, mat, start, end):
n = len(mat)
visited = [0 for x in range(n)]
pres = [0 for x in range(n)]
flag = 0
que = queue.Queue()
que.put(start)
visited[start] = 1
pres[start] = -1
while (que.empty() == False):
front = que.get()
for i in range(n):
if (mat[front][i] == 1 and visited[i] == 0):
visited[i] = 1
que.put(i)
pres[i] = front
if (i == end):
flag = 1
break
if (flag == 0):
return []
else:
res = self.getPath(pres, end, [])
res.reverse()
return res
# 以某个固定的start为起点,计算出其到所有点的连通路径
def getAllRoad(self, mat, start):
nodeNum = len(mat)
res = []
for i in range(nodeNum):
curRoad = self.getRoad(mat, start, i)
res.append(curRoad)
return res
def getRelaName(self, tableNames, relaMap):
tableNum = len(tableNames)
relaMat = [[0 for p in range(tableNum)] for q in range(tableNum)]
single = []
relaname = {}
relation = {}
k = 0
for key in relaMap:
t1 = key[0]
t2 = key[1]
ind1 = tableNames.index(t1)
ind2 = tableNames.index(t2)
relaMat[ind1][ind2] = 1
relaMat[ind2][ind1] = 1
relaMat1 = pd.DataFrame(relaMat)
for i in range(len(relaMat1)):
if (np.mean(relaMat1.loc[:, i]) == 0):
single.append(tableNames[i])
else:
x = relaMat1.loc[i + 1:, i][relaMat1.loc[i + 1:, i] == 1]
index = list(x.index)
index.append(i)
relation[k] = index
k = k + 1
for t in range(k):
for l in range(k):
if (t != l):
a = relation[t]
b = relation[l]
if (len(list(set(a).intersection(set(b)))) > 0):
for i in range(len(relation[l])):
relation[t].append(relation[l][i])
relation[t] = list(set(relation[t]))
# ralation.pop(l)
relation[l] = []
for i in range(k):
if relation[i] == []:
relation.pop(i)
n = len(relation)
for i in relation.keys():
relaname[i] = []
for y in relation[i]:
relaname[i].append(tableNames[y])
for j in range(len(single)):
relaname[single[j]] = single[j]
return relaname
# 由表名列表(能获取顺序信息)及表间相邻关系,获取邻接矩阵
def getRalationMat(self, tableNames, relaMap):
tableNum = len(tableNames)
relaMat = [[0 for p in range(tableNum)] for q in range(tableNum)]
for key in relaMap:
t1 = key[0]
t2 = key[1]
if t1 in tableNames:
if t2 in tableNames:
ind1 = tableNames.index(t1)
ind2 = tableNames.index(t2)
relaMat[ind1][ind2] = 1
relaMat[ind2][ind1] = 1
return relaMat
# 给定前置表及若干记录,分析其直接关联(相邻)的表中是否存在关联的记录,并返回之
def getRecordValues(self, df1, valueCol1, values1, conCol1, df2, conCol2):
newDf1 = df1[df1[valueCol1].isin(values1)]
conValues1 = newDf1[conCol1].tolist()
newDf2 = df2[df2[conCol2].isin(conValues1)]
return newDf2[conCol2].tolist()
# 由表间关系的dict,给定两个表名,获取其分别的关联字段
def getConCols(self, relaMap, tableName1, tableName2):
col1 = ""
col2 = ""
if (tableName1, tableName2) in relaMap:
cols = relaMap[tableName1, tableName2]
col1 = cols[0]
col2 = cols[1]
else:
cols = relaMap[tableName2, tableName1]
col1 = cols[1]
col2 = cols[0]
return [col1, col2]
# 从源表开始,判断某一给定路径下的终点表中,是否存在与源表关联的记录
def hasRecord(self, dataMap, relaMap, tableNames, road, orientCol, orientId):
roadLen = len(road)
tableName1 = tableNames[road[0]]
tableName2 = tableNames[road[1]]
[col1, col2] = self.getConCols(relaMap, tableName1, tableName2)
df1 = dataMap[tableName1]
df2 = dataMap[tableName2]
values = self.getRecordValues(df1, orientCol, [orientId], col1, df2, col2)
if (roadLen == 2):
if (len(values) == 0):
return 0
else:
return 1
else:
for i in range(1, roadLen - 1):
tableName1 = tableNames[road[i]]
tableName2 = tableNames[road[i + 1]]
orientCol = col2
[col1, col2] = self.getConCols(relaMap, tableName1, tableName2)
df1 = dataMap[tableName1]
df2 = dataMap[tableName2]
values = self.getRecordValues(df1, orientCol, values, col1, df2, col2)
if (len(values) == 0):
return 0
return 1
# 综合计算标记向量
def getVector(self, dataMap, tableNames, relaMap, relaMat, orientTable, orientCol, orientId):
start = tableNames.index(orientTable)
tableNum = len(tableNames)
roads = self.getAllRoad(relaMat, start)
vector = []
for i in range(tableNum):
if (i == start):
vector.append(1)
elif (len(roads[i]) == 0):
vector.append(0)
else:
curFlag = self.hasRecord(dataMap, relaMap, tableNames, roads[i], orientCol, orientId)
vector.append(curFlag)
return vector
# 计算向量模式分布
def getModePercents(self, vectors):
distinctList = []
counts = []
for term in vectors:
if (term not in distinctList):
distinctList.append(term)
counts.append(1)
else:
ind = distinctList.index(term)
counts[ind] += 1
totalNum = len(vectors)
res = []
for x in range(len(distinctList)):
curPercent = round(counts[x] * 100.0 / totalNum, 4)
res.append((distinctList[x], counts[x], curPercent))
return res
# 将上述各个类内串起,形成一个统一的对外方法接口
# 使用该类的功能时,只需调用此方法即可
def getDistribution(self, dataMap, relaMap):
tableNames = []
for i in dataMap.keys():
tableNames.append(i)
relaname = self.getRelaName(tableNames, relaMap)
df_Vector = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import pyiron_base
from pyiron_base._tests import TestWithCleanProject, PyironTestCase
from pyiron_base.generic.datacontainer import DataContainer
from pyiron_base.generic.hdfstub import HDFStub
from pyiron_base.generic.inputlist import InputList
from collections import Iterator
import copy
import os
import unittest
import warnings
import h5py
import numpy as np
import pandas as pd
class Sub(DataContainer):
def __init__(self, init=None, table_name=None, lazy=False, wrap_blacklist=()):
super().__init__(init=init, table_name=table_name, lazy=lazy, wrap_blacklist=())
self.foo = 42
class TestDataContainer(TestWithCleanProject):
@property
def docstring_module(self):
return pyiron_base.generic.datacontainer
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pl = DataContainer([
{"foo": "bar"},
2,
42,
{"next": [
0,
{"depth": 23}
]}
], table_name="input")
cls.pl["tail"] = DataContainer([2, 4, 8])
cls.hdf = cls.project.create_hdf(cls.project.path, "test")
# Init tests
def test_init_none(self):
pl = DataContainer()
self.assertEqual(len(pl), 0, "not empty after initialized with None")
def test_init_list(self):
l = [1, 2, 3, 4]
pl = DataContainer(l)
self.assertEqual(len(pl), len(l), "not the same length as source list")
self.assertEqual(list(pl.values()), l, "conversion to list not the same as source list")
def test_init_tuple(self):
t = (1, 2, 3, 4)
pl = DataContainer(t)
self.assertEqual(len(pl), len(t), "not the same length as source tuple")
self.assertEqual(tuple(pl.values()), t, "conversion to tuple not the same as source tuple")
def test_init_set(self):
s = {1, 2, 3, 4}
pl = DataContainer(s)
self.assertEqual(len(pl), len(s), "not the same length as source set")
self.assertEqual(set(pl.values()), s, "conversion to set not the same as source set")
def test_init_dict(self):
d = {"foo": 23, "test case": "bar"}
pl = DataContainer(d)
self.assertEqual(tuple(pl.items()), tuple(d.items()), "source dict items not preserved")
with self.assertRaises(ValueError, msg="no ValueError on invalid initializer"):
DataContainer({2: 0, 1: 1})
# access tests
def test_get_nested(self):
n = [
{"foo": "bar"},
2,
42,
{"next":
[0,
{"depth": 23}
]
}
]
pl = DataContainer(n)
self.assertEqual(type(pl[0]), DataContainer, "nested dict not converted to DataContainer")
self.assertEqual(type(pl["3/next"]), DataContainer, "nested list not converted to DataContainer")
self.assertEqual(type(pl["0/foo"]), str, "nested str converted to DataContainer")
def test_get_item(self):
self.assertEqual(self.pl[0], {"foo": "bar"}, "index with integer does not give correct element")
self.assertEqual(self.pl[0]["foo"], "bar", "index with string does not give correct element")
with self.assertRaises(IndexError, msg="no IndexError on out of bounds index"):
print(self.pl[15])
with self.assertRaises(ValueError, msg="no ValueError on invalid index type"):
print(self.pl[{}])
def test_search(self):
self.assertEqual(self.pl.search("depth"), 23, "search does not give correct element")
with self.assertRaises(KeyError, msg="search: no IndexError on inexistent key"):
print(self.pl.search("inexistent_key"))
with self.assertRaises(TypeError, msg="search: no TypeError if key is not a string"):
print(self.pl.search(0.0))
# test if '...' in slash-notation triggers search
self.assertEqual(self.pl[".../depth"], 23, "'.../' in key does not trigger search")
self.pl["next/foo/bar/depth"] = 23
# test if .../ works in setting when search is intermediate (some more items follow)
self.pl[".../bar/extra"] = "stuff"
self.assertEqual(self.pl["next/foo/bar/extra"], "stuff", "'.../' in setitem does not work (intermediate item search)")
# test if .../ works in setting when search is final (no more items follow)
self.pl[".../extra"] = "other"
self.assertEqual(self.pl["next/foo/bar/extra"], "other", "'.../' in setitem does not work (final item search)")
# test errors for multiple keys
with self.assertRaises(ValueError, msg="search: no ValueError on multiple keys"):
print(self.pl.search("depth", False))
with self.assertRaises(ValueError, msg="search: no ValueError on multiple keys"):
print(self.pl[".../depth"])
# test errors for deletion
del self.pl[".../bar/depth"]
with self.assertRaises(KeyError, msg="search: '.../' in del does not work (intermediate item search)"):
print(self.pl["next/foo/bar/depth"])
del self.pl[".../bar"]
with self.assertRaises(KeyError, msg="search: '.../' in del does not work (final item search)"):
print(self.pl["next/foo/bar"])
def test_get_attr(self):
self.assertEqual(self.pl.tail, DataContainer([2, 4, 8]), "attribute access does not give correct element")
self.assertEqual(
self.pl[3].next,
DataContainer([0, DataContainer({"depth": 23})]),
"nested attribute access does not give correct element"
)
def test_get_sempath(self):
self.assertEqual(self.pl["0"], {"foo": "bar"}, "decimal string not converted to integer")
self.assertEqual(self.pl["0/foo"], "bar", "nested access does not give correct element")
self.assertEqual(self.pl["3/next/1/depth"], 23, "nested access does not give correct element")
self.assertEqual(self.pl["3/next/0"], 0, "nested access does not give correct element")
self.assertEqual(
self.pl["3/next/1/depth"],
self.pl[3, "next", 1, "depth"],
"access via semantic path and tuple not the same"
)
def test_get_string_int(self):
self.assertEqual(self.pl[0], self.pl["0"], "access via index and digit-only string not the same")
def test_set_item(self):
self.pl[1] = 4
self.assertEqual(self.pl[1], 4, "setitem does not properly set value on int index")
self.pl[1] = 2
self.pl[0, "foo"] = "baz"
self.assertEqual(self.pl[0, "foo"], "baz", "setitem does not properly set value on tuple index")
self.pl[0, "foo"] = "bar"
def test_set_errors(self):
with self.assertRaises(IndexError, msg="no IndexError on out of bounds index"):
self.pl[15] = 42
with self.assertRaises(ValueError, msg="no ValueError on invalid index type"):
self.pl[{}] = 42
def test_set_some_keys(self):
pl = DataContainer([1, 2])
pl["end"] = 3
self.assertEqual(pl, DataContainer({0: 1, 1: 2, "end": 3}))
def test_set_append(self):
pl = DataContainer()
# should not raise and exception
pl[0] = 1
pl[1] = 2
self.assertEqual(pl[0], 1, "append via index broken on empty list")
self.assertEqual(pl[1], 2, "append via index broken on non-empty list")
pl.append([])
self.assertTrue(isinstance(pl[-1], list), "append wraps sequences as DataContainer, but should not")
pl.append({})
self.assertTrue(isinstance(pl[-1], dict), "append wraps mappings as DataContainer, but should not")
def test_update(self):
pl = DataContainer()
d = self.pl.to_builtin()
pl.update(d, wrap=True)
self.assertEqual(pl, self.pl, "update from to_builtin does not restore list")
with self.assertRaises(ValueError, msg="no ValueError on invalid initializer"):
pl.update("asdf")
pl = self.pl.copy()
pl.update({}, pyiron="yes", test="case")
self.assertEqual((pl.pyiron, pl.test), ("yes", "case"), "update via kwargs does not set values")
pl.clear()
d = {"a": 0, "b": 1, "c": 2}
pl.update(d)
self.assertEqual(dict(pl), d, "update without options does not call generic method")
def test_update_blacklist(self):
"""Wrapping nested mapping should only apply to types not in the blacklist."""
pl = DataContainer()
pl.update([ {"a": 1, "b": 2}, [{"c": 3, "d": 4}] ], wrap=True, blacklist=(dict,))
self.assertTrue(isinstance(pl[0], dict), "nested dict wrapped, even if black listed")
self.assertTrue(isinstance(pl[1][0], dict), "nested dict wrapped, even if black listed")
pl.clear()
pl.update({"a": [1, 2, 3], "b": {"c": [4, 5, 6]}}, wrap=True, blacklist=(list,))
self.assertTrue(isinstance(pl.a, list), "nested list wrapped, even if black listed")
self.assertTrue(isinstance(pl.b.c, list), "nested list wrapped, even if black listed")
pl.clear()
def test_wrap_hdf(self):
"""DataContainer should be able to be initialized by HDF objects."""
h = self.project.create_hdf(self.project.path, "wrap_test")
h["foo"] = 42
h.create_group("bar")["test"] = 23
h["bar"].create_group("nested")["test"] = 23
d = DataContainer(h)
self.assertTrue(isinstance(d.bar, DataContainer),
"HDF group not wrapped from ProjectHDFio.")
self.assertTrue(isinstance(d.bar.nested, DataContainer),
"Nested HDF group not wrapped from ProjectHDFio.")
self.assertEqual(d.foo, 42, "Top-level node not correctly wrapped from ProjectHDFio.")
self.assertEqual(d.bar.test, 23, "Nested node not correctly wrapped from ProjectHDFio.")
self.assertEqual(d.bar.nested.test, 23, "Nested node not correctly wrapped from ProjectHDFio.")
h = h5py.File(h.file_name)
d = DataContainer(h)
self.assertTrue(isinstance(d.wrap_test.bar, DataContainer),
"HDF group not wrapped from h5py.File.")
self.assertTrue(isinstance(d.wrap_test.bar.nested, DataContainer),
"Nested HDF group not wrapped from h5py.File.")
self.assertEqual(d.wrap_test.foo, h["wrap_test/foo"],
"Top-level node not correctly wrapped from h5py.File.")
self.assertEqual(d.wrap_test.bar.test, h["wrap_test/bar/test"],
"Nested node not correctly wrapped from h5py.File.")
self.assertEqual(d.wrap_test.bar.nested.test, h["wrap_test/bar/nested/test"],
"Nested node not correctly wrapped from h5py.File.")
def test_extend(self):
pl = DataContainer()
pl.extend([1, 2, 3])
self.assertEqual(list(pl.values()), [1, 2, 3], "extend from list does not set values")
def test_insert(self):
pl = DataContainer([1, 2, 3])
pl.insert(1, 42, key="foo")
self.assertTrue(pl[0] == 1 and pl[1] == 42 and pl[2] == 2, "insert does not properly set value")
pl.insert(1, 24, key="bar")
self.assertTrue(pl[0] == 1 and pl.bar == 24 and pl.foo == 42, "insert does not properly update keys")
pl.insert(10, 4)
self.assertEqual(pl[-1], 4, "insert does not handle out of bounds gracefully")
def test_mark(self):
pl = DataContainer([1, 2, 3])
pl.mark(1, "foo")
self.assertEqual(pl[1], pl.foo, "marked element does not refer to correct element")
pl.mark(2, "foo")
self.assertEqual(pl[2], pl.foo, "marking with existing key broken")
with self.assertRaises(IndexError, msg="no IndexError on invalid index"):
pl.mark(10, "foo")
def test_deep_copy(self):
pl = self.pl.copy()
self.assertTrue(pl is not self.pl, "deep copy returns same object")
self.assertTrue(
all(
pl[k1] is not self.pl[k2]
for k1, k2 in zip(pl, self.pl)
# int/str may be interned by python and always the same
# object when equal, so exclude from the check
if not isinstance(pl[k1], (int, str))),
"not a deep copy"
)
self.assertTrue(
all(
(k1 == k2) and (pl[k1] == self.pl[k2])
for k1, k2 in zip(pl, self.pl)),
"copy not equal to original"
)
def test_shallow_copy(self):
pl = copy.copy(self.pl)
self.assertTrue(pl is not self.pl, "shallow copy returns same object")
self.assertTrue(
all(
(k1 is k2) and (pl[k1] is self.pl[k2])
for k1, k2 in zip(pl, self.pl)),
"not a shallow copy"
)
self.assertTrue(
all(
(k1 == k2) and (pl[k1] == self.pl[k2])
for k1, k2 in zip(pl, self.pl)),
"copy not equal to original"
)
def test_del_item(self):
pl = DataContainer({0: 1, "a": 2, "foo": 3})
with self.assertRaises(ValueError, msg="no ValueError on invalid index type"):
del pl[{}]
del pl["a"]
self.assertTrue("a" not in pl, "delitem does not delete with str key")
del pl[0]
self.assertTrue(pl[0] != 1, "delitem does not delete with index")
def test_del_attr(self):
class SubDataContainer(DataContainer):
def __init__(self):
object.__setattr__(self, "attr", 42)
s = SubDataContainer()
del s.attr
self.assertFalse(hasattr(s, "attr"), "delattr does not work with instance attributes")
def test_numpy_array(self):
pl = DataContainer([1, 2, 3])
self.assertTrue((np.array(pl) == np.array([1, 2, 3])).all(), "conversion to numpy array broken")
def test_repr_json(self):
def rec(m):
"""
Small helper to recurse through nested lists/dicts and check if all
keys and values are strings. This should be the output format of
_repr_json_
"""
if isinstance(m, list):
for v in m:
if isinstance(v, (list, dict)):
if not rec(v):
return False
elif not isinstance(v, str):
return False
elif isinstance(m, dict):
for k, v in m.items():
if not isinstance(k, str):
return False
if isinstance(v, (list, dict)):
if not rec(v):
return False
elif not isinstance(v, str):
return False
return True
self.assertTrue(rec(self.pl._repr_json_()), "_repr_json_ output not all str")
def test_create_group(self):
"""create_group should not erase existing groups."""
cont = DataContainer()
sub1 = cont.create_group("sub")
self.assertTrue(isinstance(sub1, DataContainer), "create_group doesn't return DataContainer")
sub1.foo = 42
sub2 = cont.create_group("sub")
self.assertEqual(sub1.foo, sub2.foo, "create_group overwrites existing data.")
self.assertTrue(sub1 is sub2, "create_group return new DataContainer group instead of existing one.")
with self.assertRaises(ValueError, msg="No ValueError on existing data in Container"):
sub1.create_group("foo")
def test_to_hdf_type(self):
"""Should write correct type information."""
self.pl.to_hdf(hdf=self.hdf)
self.assertEqual(self.hdf["input/NAME"], "DataContainer")
self.assertEqual(self.hdf["input/OBJECT"], "DataContainer")
self.assertEqual(self.hdf["input/TYPE"], "<class 'pyiron_base.generic.datacontainer.DataContainer'>")
h = self.hdf.open('nested')
pl = DataContainer(self.pl)
pl.to_hdf(hdf=h)
self.assertEqual(h["NAME"], "DataContainer")
self.assertEqual(h["OBJECT"], "DataContainer")
self.assertEqual(h["TYPE"], "<class 'pyiron_base.generic.datacontainer.DataContainer'>")
def test_to_hdf_items(self):
"""Should write all sublists to HDF groups and simple items to HDF datasets."""
self.pl.to_hdf(hdf=self.hdf)
for i, (k, v) in enumerate(self.pl.items()):
k = "{}__index_{}".format(k if isinstance(k, str) else "", i)
if isinstance(v, DataContainer):
self.assertTrue(k in self.hdf["input"].list_groups(), "Sublist '{}' not a sub group in hdf!".format(k))
else:
self.assertTrue(k in self.hdf["input"].list_nodes(), "Item '{}' not a dataset in hdf!".format(k))
def test_to_hdf_name(self):
"""Should raise error if clashing names are given."""
with self.assertRaises(ValueError, msg="Cannot have names clashing with index mangling."):
DataContainer({'__index_0': 42}).to_hdf(hdf=self.hdf)
def test_to_hdf_group(self):
"""Should be possible to give a custom group name."""
self.pl.to_hdf(hdf=self.hdf, group_name="test_group")
self.assertEqual(self.hdf["test_group/NAME"], "DataContainer")
self.assertEqual(self.hdf["test_group/TYPE"], "<class 'pyiron_base.generic.datacontainer.DataContainer'>")
self.assertEqual(self.hdf["test_group/OBJECT"], "DataContainer")
def test_to_hdf_readonly(self):
"""Read-only property should be stored."""
self.pl.to_hdf(hdf=self.hdf, group_name="read_only_f")
self.assertTrue("READ_ONLY" in self.hdf["read_only_f"].list_nodes(), "read-only parameter not saved in HDF")
self.assertEqual(
self.pl.read_only,
self.hdf[self.pl.table_name]["READ_ONLY"],
"read-only parameter not correctly written to HDF"
)
pl = self.pl.copy()
pl.read_only = True
pl.to_hdf(hdf=self.hdf, group_name="read_only_t")
self.assertEqual(
pl.read_only,
self.hdf["read_only_t/READ_ONLY"],
"read-only parameter not correctly written to HDF"
)
def test_from_hdf(self):
"""Reading from HDF should give back the same list as written."""
self.pl.to_hdf(hdf=self.hdf)
l = DataContainer(table_name="input")
l.from_hdf(hdf=self.hdf)
self.assertEqual(self.pl, l)
def test_from_hdf_group(self):
"""Reading from HDF should give back the same list as written even with custom group name."""
self.pl.to_hdf(hdf=self.hdf, group_name="test_group")
l = DataContainer(table_name="input")
l.from_hdf(hdf=self.hdf, group_name="test_group")
self.assertEqual(self.pl, l)
def test_from_hdf_readonly(self):
"""Reading from HDF should restore the read-only property."""
self.pl.to_hdf(hdf=self.hdf, group_name="read_only_from")
pl = DataContainer()
pl.from_hdf(self.hdf, group_name="read_only_from")
self.assertEqual(
pl.read_only,
self.hdf["read_only_from/READ_ONLY"],
"read-only parameter not correctly read from HDF"
)
self.hdf["read_only_from/READ_ONLY"] = True
with warnings.catch_warnings(record=True) as w:
pl.from_hdf(self.hdf, group_name="read_only_from")
self.assertEqual(len(w), 0, "from_hdf on read_only DataContainer should not call _read_only_error.")
self.assertEqual(
pl.read_only,
self.hdf["read_only_from/READ_ONLY"],
"read-only parameter not correctly read from HDF"
)
def test_hdf_complex_members(self):
"""Values that implement to_hdf/from_hdf, should write themselves to the HDF file correctly."""
pl = DataContainer(table_name="complex")
pl.append(self.project.create_job(self.project.job_type.ScriptJob, "dummy1"))
pl.append(self.project.create_job(self.project.job_type.ScriptJob, "dummy2"))
pl.append(42)
pl["foo"] = "bar"
pl.to_hdf(hdf=self.hdf)
pl2 = self.hdf["complex"].to_object()
self.assertEqual(type(pl[0]), type(pl2[0]))
self.assertEqual(type(pl[1]), type(pl2[1]))
def test_hdf_empty_group(self):
"""Writing a list without table_name or group_name should only work if the HDF group is empty."""
l = DataContainer([1, 2, 3])
with self.assertRaises(ValueError, msg="No exception when writing to full hdf group."):
l.to_hdf(self.hdf)
h = self.hdf.create_group("empty_group")
l.to_hdf(h)
self.assertEqual(l, h.to_object())
def test_hdf_empty_list(self):
"""Writing and reading an empty list should work."""
l = DataContainer(table_name="empty_list")
l.to_hdf(self.hdf)
l.from_hdf(self.hdf)
self.assertEqual(len(l), 0, "Empty list read from HDF not empty.")
def test_hdf_no_wrap(self):
"""Nested mappings should not be wrapped as DataContainer after reading."""
l = DataContainer(table_name="mappings")
l.append({"foo": "bar"})
l.append([1, 2, 3])
l.to_hdf(self.hdf)
m = l.copy()
m.from_hdf(self.hdf, group_name="mappings")
self.assertEqual(l, m, "List with nested mappings not restored from HDF.")
self.assertTrue(isinstance(m[0], dict), "dicts wrapped after reading from HDF.")
self.assertTrue(isinstance(m[1], list), "lists wrapped after reading from HDF.")
def test_hdf_pandas(self):
"""Values that implement to_hdf/from_hdf, should write themselves to the HDF file correctly."""
pl = DataContainer(table_name="pandas")
pl.append( | pd.DataFrame({"a": [1, 2], "b": ["x", "y"]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_ | numeric(temp_df["成交额"], errors="coerce") | pandas.to_numeric |
import pandas as pd
import os
import numpy as np
def main():
path = "C:/path/to/folder/"
path_laptop = ""
export_csv(path_to_folder=path)
# export_csv(path_to_folder=path_laptop)
def import_df(path_to_folder):
csv_list = extract_files_to_list(path_to_folder, path_bool=True)
csv_name_list = extract_files_to_list(path_to_folder, path_bool=False)
# list of csv files which will be processed
print(csv_list)
# initialising the list of formatted tables
dataframe_list = []
name_list = []
for i, csv in enumerate(csv_list):
data = pd.read_csv(csv, sep=",")
# print(data)
dataframe_list.append(data)
name_list.append(csv_name_list[i])
return name_list, dataframe_list
def format_df(path_to_folder):
name_list, dataframe_list = import_df(path_to_folder)
# initialising the list of processed tables
data_finished = []
t = 0
for data in dataframe_list:
print(name_list[t])
t += 1
# calculate n
n = int((len(data.columns) - 1) / 2 / 24)
# delete first column with condition names (contains umlauts)
data = data.drop(data.columns[0], axis=1)
data = data.rename(index={0: "barr", 1: "Goe", 2: "dFLR"})
print(data)
# exclude values which were excluded in prism and therefore in bargraph
# excluded values are marked with *
# to do this, transform pandas data frame to numpy array
data = data.to_numpy()
# for row in range(0, 6):
# for i, value in enumerate(data[row]):
# try:
# data[row, i] = pd.to_numeric(value)
# except ValueError:
# data[row, i] = np.nan
# preparation of column and rownames for retransform numpy array to pandas data frame
header = ["baseline", "stimulated"]
column_names = []
for i in header:
if len(data[1]) < (2*256):
for j in range(n * 24):
column_names += [i]
else:
for j in range(256):
column_names += [i]
# retransform numpy array to pandas data frame
data = pd.DataFrame(data=data, index=["barr", "Goe", "dFLR"], columns=column_names)
print(data)
# calculate mean of each triplicate
if len(column_names) < (2*256):
# mean of baselines (24x technical replicates per n)
mean_baseline_data = pd.concat([data.iloc[:, i:i + 24].mean(axis=1) for i in range(0, n * 24, 24)], axis=1)
# mean of stimulates data (3x technical replicates per n)
mean_stimulated_data = pd.concat([data.iloc[:, i:i + 3].mean(axis=1) for i in range(n * 24, n * 24 + n * 3, 3)],
axis=1)
if len(column_names) == (2*256):
mean_baseline_data = pd.concat([data.iloc[:, i:i + 24].mean(axis=1) for i in range(0, 9*24, 24)], axis=1)
mean_baseline_data["9"] = [np.mean(data.values[0][239:256]), "NaN", "NaN"]
mean_stimulated_data = pd.concat([data.iloc[:, i:i + 3].mean(axis=1) for i in range(256, 256 + 9 * 3, 3)],
axis=1)
mean_stimulated_data["9"] = [np.mean(data.values[0][256:289]), "NaN", "NaN"]
print("mean baseline")
print(mean_baseline_data)
print("mean stim")
print(mean_stimulated_data)
# create column for id
id = []
for j in range(len(header)):
for i in range(1, (len(data.index) * n) + 1):
temp = str(i)
id += [temp]
# create column for condition
condition = []
for j in range(len(header)):
for i in data.index:
for r in range(n):
condition += [i]
# create column for state (base or stim)
state = []
for j in header:
for i in range(n * len(data.index)):
state += [j]
# create column for BRET ratio
BRET = []
# for loop extracts BRET ratios row wise and saves them in array
# baseline BRET ratios
for row in range(len(data.index)):
for value in mean_baseline_data.iloc[row]:
BRET += [value]
# append stimulated BRET ratios
for row in range(len(data.index)):
for value in mean_stimulated_data.iloc[row]:
BRET += [value]
mean_data = | pd.DataFrame(id) | pandas.DataFrame |
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import requests
import utils
import pandas as pd
import datetime as dt
import numpy as np
from itertools import groupby
import time
class vacunacion:
def __init__(self,output,indicador):
self.output = output
self.indicador = indicador
self.my_files = {
'vacunacion_fabricante':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv',
'vacunacion_region':
'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv',
'vacunacion_edad':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv',
'vacunacion_grupo':
'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv',
}
self.path = '../input/Vacunacion'
def get_last(self):
## baja el archivo que corresponde
if self.indicador == 'fabricante':
print('Retrieving files')
print('vacunacion_fabricante')
r = requests.get(self.my_files['vacunacion_fabricante'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'campana':
print('Retrieving files')
print('vacunacion_region')
r = requests.get(self.my_files['vacunacion_region'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'edad':
print('Retrieving files')
print('vacunacion_edad')
r = requests.get(self.my_files['vacunacion_edad'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
elif self.indicador == 'caracteristicas_del_vacunado':
print('Retrieving files')
print('vacunacion_grupo')
r = requests.get(self.my_files['vacunacion_grupo'])
content = r.content
csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb')
csv_file.write(content)
csv_file.close()
## selecciona el archivo que corresponde
if self.indicador == 'fabricante':
print('reading files')
print('vacunacion_fabricante')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv')
elif self.indicador == 'campana':
print('reading files')
print('vacunacion_region')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv')
elif self.indicador == 'edad':
print('reading files')
print('vacunacion_edad')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv')
elif self.indicador == 'caracteristicas_del_vacunado':
print('reading files')
print('vacunacion_grupo')
self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv')
elif self.indicador == 'vacunas_region':
print('reading files')
print('vacunacion por region por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna':
print('reading files')
print('vacunacion por comuna por dia')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_region':
print('reading files')
print('vacunacion por region por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_edad_sexo':
print('reading files')
print('vacunacion por sexo por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
print('vacunacion por sexo por edad y FECHA')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1')
self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_prioridad':
print('reading files')
print('vacunacion por grupos prioritarios')
self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1')
# aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1')
# self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_comuna_edad':
print('reading files')
print('vacunacion por comuna por edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_establecimiento':
print('reading files')
print('vacunacion por establecimiento')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante':
print('reading files')
print('vacunacion por fabricante y fecha')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
elif self.indicador == 'vacunas_fabricante_edad':
print('reading files')
print('vacunacion por fabricante y edad')
aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9.csv', sep=';', encoding='ISO-8859-1')
aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9_2.csv', sep=';', encoding='ISO-8859-1')
self.last_added = pd.concat([aux, aux_2], ignore_index=True)
def last_to_csv(self):
if self.indicador == 'fabricante':
## campana por fabricante
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
self.last_added.rename(columns={'Type': 'Fabricante'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Fabricante', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'campana':
## campana por region
self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True)
utils.regionName(self.last_added)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda",
"Third": "Tercera",
"Fourth": "Cuarta",
"Unique": "Unica"
})
identifiers = ['Region', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'edad':
## campana por edad
self.last_added.rename(columns={'Dose': 'Dosis',
'Age':'Rango_etario'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Rango_etario', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'caracteristicas_del_vacunado':
## campana por caracter del vacunado
self.last_added.rename(columns={'Dose': 'Dosis',
'Group':'Grupo'}, inplace=True)
self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera",
"Second": "Segunda"
})
identifiers = ['Grupo', 'Dosis']
variables = [x for x in self.last_added.columns if x not in identifiers]
self.last_added = self.last_added[identifiers + variables]
self.last_added.to_csv(self.output + '.csv', index=False)
df_t = self.last_added.T
df_t.to_csv(self.output + '_t.csv', header=False)
df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'],
value_name='Cantidad')
df_std.to_csv(self.output + '_std.csv', index=False)
elif self.indicador == 'vacunas_region':
self.last_added.rename(columns={'REGION_CORTO': 'Region',
'COD_COMUNA_FINAL': 'Comuna',
'FECHA_INMUNIZACION': 'Fecha',
'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna',
'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna',
'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna',
'SUM_of_4_Dosis':'Cuarta_comuna',
'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True)
self.last_added = self.last_added.dropna(subset=['Fecha'])
self.last_added['Fecha'] = | pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y') | pandas.to_datetime |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameAlign:
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
assert af._mgr is float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
)
tm.assert_index_equal(bf.index, Index([]))
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
left, right = float_frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, float_frame.index)
expected = {c: s for c in float_frame.columns}
expected = DataFrame(
expected, index=float_frame.index, columns=float_frame.columns
)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_int(self, int_frame):
# test other non-float types
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
def test_align_mixed_type(self, float_string_frame):
af, bf = float_string_frame.align(
float_string_frame, join="inner", axis=1, method="pad"
)
tm.assert_index_equal(bf.columns, float_string_frame.columns)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = pd.Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
| tm.assert_frame_equal(res2, exp1) | pandas._testing.assert_frame_equal |
# LIBRERÍAS
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scikitplot as skplt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from scipy import stats
from sklearn import metrics
from sklearn.preprocessing import scale
# machine learning
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# LEER ARCHIVOS
data_train = pd.read_csv('C:/Users/agus_/Downloads/train.csv')
data_test = pd.read_csv('C:/Users/agus_/Downloads/test.csv')
# Información del dataset completo
print(data_train.info())
print("-"*40)
print(data_test.info())
print("-"*67)
print(data_train.describe())
print("\n")
# Features originales del dataset
print(data_train.columns.values)
print("-"*35)
print(data_test.columns.values)
print("\n")
# ETAPAS DE ANÁLISIS DE DATOS - INGENIERÍA DE FEATURES
# Se analizarán aquellos features que consideramos necesarios para incluirlos en nuestro modelo. Para ello, se seguirá
# una serie de pasos para luego decidir qué features son relevantes y cuales no.
# 1) Correlación de features
# En esta etapa, analizaremos los features que creemos que tienen correlación con Survived. Solo haremos esto con aquellas
# características que no tengan valores vacíos. En caso de tener una alta correlación, se incluirán en el modelo.
print(data_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Pclass", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Sex", y="Survived", data=data_train,kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="SibSp", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Parch", y="Survived", data=data_train, kind="bar", size=6 , palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
print(data_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Embarked", y="Survived", data=data_train, size=6, kind="bar", palette="muted")
grid.despine(left=True)
grid = grid.set_ylabels("survival probability")
plt.show()
# sns.set(style="darkgrid")
grid = sns.FacetGrid(data_train, col='Survived')
grid = grid.map(sns.distplot, 'Age', hist=True, hist_kws=dict(edgecolor="w"), color='blue')
plt.show()
# 2) Corrección de features
# En esta etapa, se eliminarán aquellos features que se consideran totalmente irrelevantes para incluirlos en el modelo.
# ¿Cómo nos damos cuenta de ello? Simple, se observan aquellos features que son independientes y no aportan información
# para saber si la persona sobrevivió o no. En este caso, son PassengerId, Ticket y Cabin.
data_train = data_train.drop(['PassengerId', 'Ticket', 'Cabin'], axis=1)
data_test = data_test.drop(['Ticket', 'Cabin'], axis=1)
print(data_train.columns.values)
print(data_train.shape)
print("\n")
print(data_test.columns.values)
print(data_test.shape)
print("\n")
# 3) Creación de features
# En esta etapa, se analizarán aquellos features que por si solos hacen que el modelo sea más complejo, pero agrupando
# esas características en una nueva, simplifica el modelo y ayuda a entenderlo aún más.
# Se analizará si es conveniente crear una nueva característica a partir de las existentes.
dataset = [data_train, data_test]
for data in dataset:
data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False)
for data in dataset:
data['Title'] = data['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other')
data['Title'] = data['Title'].replace('Mlle', 'Miss')
data['Title'] = data['Title'].replace('Ms', 'Miss')
data['Title'] = data['Title'].replace('Mme', 'Mrs')
print(data_train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean().sort_values(by='Survived', ascending=False))
print("\n")
grid = sns.factorplot(x="Title", y="Survived", data=data_train, kind="bar")
grid = grid.set_xticklabels(["Master","Miss", "Mrs","Mr","Rare"])
grid = grid.set_ylabels("survival probability")
plt.show()
transformacion_de_titulos = {"Master": 1, "Miss": 2, "Mrs": 3, "Mr": 4, "Other": 5}
for data in dataset:
data['Title'] = data['Title'].map(transformacion_de_titulos)
data['Title'] = data['Title'].fillna(value=0) # fillna() ---> busca todos los valores NaN y los reemplaza por 0
print()
data_train = data_train.drop(['Name'], axis=1)
data_test = data_test.drop(['Name'], axis=1)
dataset = [data_train, data_test]
# Sex dummies
data_train = | pd.get_dummies(data=data_train, columns=['Sex']) | pandas.get_dummies |
#!/usr/bin/env python
"""Script for generating figures of catalog statistics. Run `QCreport.py -h`
for command line usage.
"""
import os
import sys
import errno
import argparse
from datetime import date, datetime
from math import sqrt, radians, cos
import markdown
import numpy as np
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Polygon
from obspy.geodetics.base import gps2dist_azimuth
# Python 2
try:
from urllib2 import urlopen, HTTPError
# Python 3
except ImportError:
from urllib.request import urlopen, HTTPError
import QCutils as qcu
from decorators import retry, printstatus
###############################################################################
###############################################################################
###############################################################################
@printstatus('Creating basic catalog summary')
def basic_cat_sum(catalog, dirname, dup1, dup2, timewindow, distwindow):
"""Gather basic catalog summary statistics."""
lines = []
lines.append('Catalog name: %s\n\n' % dirname[:-9].upper())
lines.append('First date in catalog: %s\n' % catalog['time'].min())
lines.append('Last date in catalog: %s\n\n' % catalog['time'].max())
lines.append('Total number of events: %s\n\n' % len(catalog))
lines.append('Minimum latitude: %s\n' % catalog['latitude'].min())
lines.append('Maximum latitude: %s\n' % catalog['latitude'].max())
lines.append('Minimum longitude: %s\n' % catalog['longitude'].min())
lines.append('Maximum longitude: %s\n\n' % catalog['longitude'].max())
lines.append('Minimum depth: %s\n' % catalog['depth'].min())
lines.append('Maximum depth: %s\n' % catalog['depth'].max())
lines.append('Number of 0 km depth events: %s\n'
% len(catalog[catalog['depth'] == 0]))
lines.append('Number of NaN depth events: %s\n\n'
% len(catalog[pd.isnull(catalog['depth'])]))
lines.append('Minimum magnitude: %s\n' % catalog['mag'].min())
lines.append('Maximum magnitude: %s\n' % catalog['mag'].max())
lines.append('Number of 0 magnitude events: %s\n'
% len(catalog[catalog['mag'] == 0]))
lines.append('Number of NaN magnitude events: %s\n\n'
% len(catalog[pd.isnull(catalog['mag'])]))
lines.append('Number of possible duplicates (%ss and %skm threshold): %d\n'
% (timewindow, distwindow, dup1))
lines.append('Number of possible duplicates (16s and 100km threshold): %d'
% dup2)
with open('%s_catalogsummary.txt' % dirname, 'w') as sumfile:
for line in lines:
sumfile.write(line)
def largest_ten(catalog, dirname):
"""Make a list of the 10 events with largest magnitude."""
catalog = catalog.sort_values(by='mag', ascending=False)
topten = catalog.head(n=10)
topten = topten[['time', 'id', 'latitude', 'longitude', 'depth', 'mag']]
with open('%s_largestten.txt' % dirname, 'w') as magfile:
for event in topten.itertuples():
line = ' '.join([str(x) for x in event[1:]]) + '\n'
magfile.write(line)
@printstatus('Finding possible duplicates')
def list_duplicates(catalog, dirname, timewindow=2, distwindow=15,
magwindow=None, minmag=-5, locfilter=None):
"""Make a list of possible duplicate events."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T'))
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
catalog = catalog[catalog['mag'] >= minmag]
if locfilter:
catalog = catalog[catalog['place'].str.contains(locfilter, na=False)]
cat = catalog[['time', 'convtime', 'id', 'latitude', 'longitude', 'depth',
'mag']].copy()
cat.loc[:, 'time'] = [qcu.to_epoch(x) for x in cat['time']]
duplines1 = [('Possible duplicates using %ss time threshold and %skm '
'distance threshold\n') % (timewindow, distwindow),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
duplines2 = [('\n\nPossible duplicates using 16s time threshold and 100km '
'distance threshold\n'),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
sep = '-----------------------\n'
thresh1dupes, thresh2dupes = 0, 0
for event in cat.itertuples():
trimdf = cat[cat['convtime'].between(event.convtime, event.convtime
+ pd.Timedelta(seconds=16), inclusive=False)]
if len(trimdf) != 0:
for tevent in trimdf.itertuples():
dist = gps2dist_azimuth(event.latitude, event.longitude,
tevent.latitude, tevent.longitude)[0] / 1000.
if dist < 100:
dtime = (event.convtime - tevent.convtime).total_seconds()
dmag = event.mag - tevent.mag
diffs = map('{:.2f}'.format, [dist, dtime, dmag])
dupline1 = ' '.join([str(x) for x in event[1:]]) + ' ' +\
' '.join(diffs) + '\n'
dupline2 = ' '.join([str(x) for x in tevent[1:]]) + '\n'
duplines2.extend((sep, dupline1, dupline2))
thresh2dupes += 1
if (dist < distwindow) and (abs(dtime) < timewindow):
duplines1.extend((sep, dupline1, dupline2))
thresh1dupes += 1
continue
with open('%s_duplicates.txt' % dirname, 'w') as dupfile:
for dupline in duplines1:
dupfile.write(dupline)
for dupline in duplines2:
dupfile.write(dupline)
return thresh1dupes, thresh2dupes
@printstatus('Mapping earthquake locations')
def map_detecs(catalog, dirname, minmag=-5, mindep=-50, title=''):
"""Make scatter plot of detections with magnitudes (if applicable)."""
catalog = catalog[(catalog['mag'] >= minmag)
& (catalog['depth'] >= mindep)].copy()
if len(catalog) == 0:
print('\nCatalog contains no events deeper than %s.' % mindep)
return
# define map bounds
lllat, lllon, urlat, urlon, _, _, _, clon = qcu.get_map_bounds(catalog)
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.set_extent([lllon, urlon, lllat, urlat], ccrs.PlateCarree())
mplmap.coastlines('50m', facecolor='none')
# if catalog has magnitude data
if not catalog['mag'].isnull().all():
bins = [0, 5, 6, 7, 8, 15]
binnames = ['< 5', '5-6', '6-7', '7-8', r'$\geq$8']
binsizes = [10, 25, 50, 100, 400]
bincolors = ['g', 'b', 'y', 'r', 'r']
binmarks = ['o', 'o', 'o', 'o', '*']
catalog.loc[:, 'maggroup'] = pd.cut(catalog['mag'], bins,
labels=binnames)
for i, label in enumerate(binnames):
mgmask = catalog['maggroup'] == label
rcat = catalog[mgmask]
lons, lats = list(rcat['longitude']), list(rcat['latitude'])
if len(lons) > 0:
mplmap.scatter(lons, lats, s=binsizes[i], marker=binmarks[i],
c=bincolors[i], label=binnames[i], alpha=0.8,
zorder=10, transform=ccrs.PlateCarree())
plt.legend(loc='lower left', title='Magnitude')
# if catalog does not have magnitude data
else:
lons, lats = list(catalog['longitude']), list(catalog['latitude'])
mplmap.scatter(lons, lats, s=15, marker='x', c='r', zorder=10)
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
mplmap.add_feature(cfeature.BORDERS)
plt.title(title, fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
if mindep != -50:
plt.savefig('%s_morethan%sdetecs.png' % (dirname, mindep), dpi=300)
else:
plt.savefig('%s_mapdetecs.png' % dirname, dpi=300)
plt.close()
@printstatus('Mapping earthquake density')
def map_detec_nums(catalog, dirname, title='', numcolors=16, rmin=77, rmax=490,
minmag=-5, pltevents=True):
"""Map detections and a grid of detection density. rmax=510 is white,
rmin=0 is black.
"""
# generate bounds for map
mask = catalog['mag'] >= minmag
lllat, lllon, urlat, urlon, gridsize, hgridsize, _, clon = \
qcu.get_map_bounds(catalog[mask])
catalog = qcu.add_centers(catalog, gridsize)
groupedlatlons, _, cmax = qcu.group_lat_lons(catalog, minmag=minmag)
# print message if there are no detections with magnitudes above minmag
if cmax == 0:
print("No detections over magnitude %s" % minmag)
# create color gradient from light red to dark red
colors = qcu.range2rgb(rmin, rmax, numcolors)
# put each center into its corresponding color group
colorgroups = list(np.linspace(0, cmax, numcolors))
groupedlatlons.loc[:, 'group'] = np.digitize(groupedlatlons['count'],
colorgroups)
# create map
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.set_extent([lllon, urlon, lllat, urlat], ccrs.PlateCarree())
mplmap.coastlines('50m')
mplmap.add_feature(cfeature.BORDERS)
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
plt.title(title, fontsize=20)
plt.subplots_adjust(left=0.01, right=0.9, top=0.95, bottom=0.05)
# create color map based on rmin and rmax
cmap = LinearSegmentedColormap.from_list('CM', colors)._resample(numcolors)
# make dummy plot for setting color bar
colormesh = mplmap.pcolormesh(colors, colors, colors, cmap=cmap, alpha=1,
vmin=0, vmax=cmax)
# format color bar
cbticks = [x for x in np.linspace(0, cmax, numcolors+1)]
cbar = plt.colorbar(colormesh, ticks=cbticks)
cbar.ax.set_yticklabels([('%.0f' % x) for x in cbticks])
cbar.set_label('# of detections', rotation=270, labelpad=15)
# plot rectangles with color corresponding to number of detections
for center, _, cgroup in groupedlatlons.itertuples():
minlat, maxlat = center[0]-hgridsize, center[0]+hgridsize
minlon, maxlon = center[1]-hgridsize, center[1]+hgridsize
glats = [minlat, maxlat, maxlat, minlat]
glons = [minlon, minlon, maxlon, maxlon]
color = colors[cgroup-1]
qcu.draw_grid(glats, glons, color, alpha=0.8)
# if provided, plot detection epicenters
if pltevents and not catalog['mag'].isnull().all():
magmask = catalog['mag'] >= minmag
lons = list(catalog['longitude'][magmask])
lats = list(catalog['latitude'][magmask])
mplmap.scatter(lons, lats, c='k', s=7, marker='x', zorder=5)
elif catalog['mag'].isnull().all():
lons = list(catalog['longitude'])
lats = list(catalog['latitude'])
mplmap.scatter(lons, lats, c='k', s=7, marker='x', zorder=5)
plt.savefig('%s_eqdensity.png' % dirname, dpi=300)
plt.close()
@printstatus('Making histogram of given parameter')
def make_hist(catalog, param, binsize, dirname, title='', xlabel='',
countlabel=False, maxval=None):
"""Plot histogram grouped by some parameter."""
paramlist = catalog[pd.notnull(catalog[param])][param].tolist()
minparam, maxparam = min(paramlist), max(paramlist)
paramdown = qcu.round2bin(minparam, binsize, 'down')
paramup = qcu.round2bin(maxparam, binsize, 'up')
numbins = int((paramup-paramdown) / binsize)
labelbuff = float(paramup-paramdown) / numbins * 0.5
diffs = [abs(paramlist[i+1]-paramlist[i]) for i in range(len(paramlist))
if i+1 < len(paramlist)]
diffs = [round(x, 1) for x in diffs if x > 0]
plt.figure(figsize=(10, 6))
plt.title(title, fontsize=20)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel('Count', fontsize=14)
if param == 'ms':
parambins = np.linspace(paramdown, paramup, numbins+1)
plt.xlim(paramdown, paramup)
else:
parambins = np.linspace(paramdown, paramup+binsize,
numbins+2) - binsize/2.
plt.xlim(paramdown-binsize/2., paramup+binsize/2.)
phist = plt.hist(paramlist, parambins, alpha=0.7, color='b', edgecolor='k')
maxbarheight = max([phist[0][x] for x in range(numbins)] or [0])
labely = maxbarheight / 50.
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.11)
if maxval:
plt.xlim(xmax=maxval)
plt.ylim(0, maxbarheight*1.1+0.1)
# put count numbers above the bars if countlabel=True
if countlabel:
for i in range(numbins):
plt.text(phist[1][i]+labelbuff, phist[0][i]+labely,
'%0.f' % phist[0][i], size=12, ha='center')
if maxval:
plt.savefig('%s_zoom%shistogram.png' % (dirname, param), dpi=300)
else:
plt.savefig('%s_%shistogram.png' % (dirname, param), dpi=300)
plt.close()
@printstatus('Making histogram of given time duration')
def make_time_hist(catalog, timelength, dirname, title=''):
"""Make histogram either by hour of the day or by date."""
timelist = catalog['time']
plt.figure(figsize=(10, 6))
plt.title(title, fontsize=20)
plt.ylabel('Count', fontsize=14)
if timelength == 'hour':
lons = np.linspace(-180, 180, 25).tolist()
hours = np.linspace(-12, 12, 25).tolist()
tlonlist = catalog.loc[:, ['longitude', 'time']]
tlonlist.loc[:, 'rLon'] = qcu.round2lon(tlonlist['longitude'])
tlonlist.loc[:, 'hour'] = [int(x.split('T')[1].split(':')[0])
for x in tlonlist['time']]
tlonlist.loc[:, 'rhour'] = [x.hour + hours[lons.index(x.rLon)]
for x in tlonlist.itertuples()]
tlonlist.loc[:, 'rhour'] = [x+24 if x < 0 else x-24 if x > 23 else x
for x in tlonlist['rhour']]
hourlist = tlonlist.rhour.tolist()
hourbins = np.linspace(-0.5, 23.5, 25)
plt.hist(hourlist, hourbins, alpha=1, color='b', edgecolor='k')
plt.xlabel('Hour of the Day', fontsize=14)
plt.xlim(-0.5, 23.5)
elif timelength == 'day':
daylist = [x.split('T')[0] for x in timelist]
daydf = pd.DataFrame({'date': daylist})
daydf['date'] = daydf['date'].astype('datetime64[ns]')
daydf = daydf.groupby([daydf['date'].dt.year,
daydf['date'].dt.month,
daydf['date'].dt.day]).count()
eqdates = daydf.index.tolist()
counts = daydf.date.tolist()
eqdates = [date(x[0], x[1], x[2]) for x in eqdates]
minday, maxday = min(eqdates), max(eqdates)
plt.bar(eqdates, counts, alpha=1, color='b', width=1)
plt.xlabel('Date', fontsize=14)
plt.xlim(minday, maxday)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.11)
plt.savefig('%s_%shistogram.png' % (dirname, timelength), dpi=300)
plt.close()
@printstatus('Graphing mean time separation')
def graph_time_sep(catalog, dirname):
"""Make bar graph of mean time separation between events by date."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
catalog.loc[:, 'dt'] = catalog.convtime.diff().astype('timedelta64[ns]')
catalog.loc[:, 'dtmin'] = catalog['dt'] / pd.Timedelta(minutes=1)
mindate = catalog['convtime'].min()
maxdate = catalog['convtime'].max()
fig = plt.figure(figsize=(10, 6))
axfull = fig.add_subplot(111)
axfull.set_ylabel('Time separation (min)', fontsize=14, labelpad=20)
axfull.spines['top'].set_color('none')
axfull.spines['bottom'].set_color('none')
axfull.spines['left'].set_color('none')
axfull.spines['right'].set_color('none')
axfull.tick_params(labelcolor='w', top='off', bottom='off',
left='off', right='off')
if maxdate - mindate < pd.Timedelta(days=1460):
# time separation between events
fig.add_subplot(311)
plt.plot(catalog['convtime'], catalog['dtmin'], alpha=1, color='b')
plt.xlabel('Date')
plt.title('Time separation between events')
plt.xlim(mindate, maxdate)
plt.ylim(0)
# maximum monthly time separation
fig.add_subplot(312)
month_max = catalog.resample('1M', on='convtime').max()['dtmin']
months = month_max.index.map(lambda x: x.strftime('%Y-%m')).tolist()
months = [date(int(x[:4]), int(x[-2:]), 1) for x in months]
plt.bar(months, month_max.tolist(), color='b', alpha=1, width=31,
edgecolor='k')
plt.xlabel('Month')
plt.title('Maximum event separation by month')
plt.xlim(mindate - pd.Timedelta(days=15),
maxdate - pd.Timedelta(days=16))
# median monthly time separation
fig.add_subplot(313)
month_med = catalog.resample('1M', on='convtime').median()['dtmin']
plt.bar(months, month_med.tolist(), color='b', alpha=1, width=31,
edgecolor='k')
plt.xlabel('Month')
plt.title('Median event separation by month')
plt.tight_layout()
plt.xlim(mindate - pd.Timedelta(days=15),
maxdate - pd.Timedelta(days=16))
else:
# time separation between events
fig.add_subplot(311)
plt.plot(catalog['convtime'], catalog['dtmin'], alpha=1, color='b')
plt.xlabel('Date')
plt.title('Time separation between events')
plt.xlim(mindate, maxdate)
plt.ylim(0)
# maximum yearly time separation
fig.add_subplot(312)
year_max = catalog.resample('1Y', on='convtime').max()['dtmin']
years = year_max.index.map(lambda x: x.strftime('%Y')).tolist()
years = [date(int(x[:4]), 1, 1) for x in years]
plt.bar(years, year_max.tolist(), color='b', alpha=1, width=365,
edgecolor='k')
plt.xlabel('Year')
plt.title('Maximum event separation by year')
plt.xlim(mindate - pd.Timedelta(days=183),
maxdate - pd.Timedelta(days=183))
# median yearly time separation
fig.add_subplot(313)
year_med = catalog.resample('1Y', on='convtime').median()['dtmin']
plt.bar(years, year_med.tolist(), color='b', alpha=1, width=365,
edgecolor='k')
plt.xlabel('Year')
plt.title('Median event separation by year')
plt.tight_layout()
plt.xlim(mindate - pd.Timedelta(days=183),
maxdate - pd.Timedelta(days=183))
plt.savefig('%s_timeseparation.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing median magnitude by time')
def med_mag(catalog, dirname):
"""Make a bar graph of median event magnitude by year."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
mindate = catalog['convtime'].min()
maxdate = catalog['convtime'].max()
if maxdate - mindate < pd.Timedelta(days=1460):
month_max = catalog.resample('1M', on='convtime').max()['mag']
months = month_max.index.map(lambda x: x.strftime('%Y-%m')).tolist()
months = [date(int(x[:4]), int(x[-2:]), 1) for x in months]
month_medmag = catalog.resample('1M', on='convtime').median()['mag']
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.tick_params(bottom='off')
plt.bar(months, month_medmag.tolist(), color='b', edgecolor='k',
alpha=1, width=31)
plt.xlabel('Month', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.title('Monthly Median Magnitude', fontsize=20)
plt.xlim(min(months) - pd.Timedelta(days=15),
max(months) + pd.Timedelta(days=15))
else:
year_max = catalog.resample('1Y', on='convtime').max()['mag']
years = year_max.index.map(lambda x: x.strftime('%Y')).tolist()
years = [date(int(x[:4]), 1, 1) for x in years]
year_medmag = catalog.resample('1Y', on='convtime').median()['mag']
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.tick_params(bottom='off')
plt.bar(years, year_medmag.tolist(), color='b', edgecolor='k', alpha=1,
width=365)
plt.xlabel('Year', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.title('Yearly Median Magnitude', fontsize=20)
plt.xlim(min(years) - pd.Timedelta(days=183),
max(years) - pd.Timedelta(days=183))
plt.savefig('%s_medianmag' % dirname, dpi=300)
plt.close()
@printstatus('Graphing magnitude completeness')
def cat_mag_comp(catalog, dirname, magbin=0.1):
"""Plot catalog magnitude completeness."""
catalog = catalog[pd.notnull(catalog['mag'])]
mags = np.array(catalog['mag'])
mags = np.around(mags, 1)
minmag, maxmag = min(min(mags), 0), max(mags)
mag_centers = np.arange(minmag, maxmag + 2*magbin, magbin)
cdf = np.zeros(len(mag_centers))
for idx in range(len(cdf)):
cdf[idx] = np.count_nonzero(
~np.isnan(mags[mags >= mag_centers[idx]-0.001]))
mag_edges = np.arange(minmag - magbin/2., maxmag+magbin, magbin)
g_r, _ = np.histogram(mags, mag_edges)
idx = list(g_r).index(max(g_r))
mc_est = mag_centers[idx]
try:
mc_est, bvalue, avalue, lval, mc_bins, std_dev = qcu.WW2000(mc_est,
mags, magbin)
except:
mc_est = mc_est + 0.3
mc_bins = np.arange(0, maxmag + magbin/2., magbin)
bvalue = np.log10(np.exp(1))/(np.average(mags[mags >= mc_est])
- (mc_est-magbin/2.))
avalue = np.log10(len(mags[mags >= mc_est])) + bvalue*mc_est
log_l = avalue-bvalue*mc_bins
lval = 10.**log_l
std_dev = bvalue/sqrt(len(mags[mags >= mc_est]))
plt.figure(figsize=(8, 6))
plt.scatter(mag_centers[:len(g_r)], g_r, edgecolor='r', marker='o',
facecolor='none', label='Incremental')
plt.scatter(mag_centers, cdf, c='k', marker='+', label='Cumulative')
plt.axvline(mc_est, c='r', linestyle='--', label='Mc = %2.1f' % mc_est)
plt.plot(mc_bins, lval, c='k', linestyle='--',
label='B = %1.3f%s%1.3f' % (bvalue, u'\u00B1', std_dev))
ax1 = plt.gca()
ax1.set_yscale('log')
max_count = np.amax(cdf) + 100000
ax1.set_xlim([minmag, maxmag])
ax1.set_ylim([1, max_count])
plt.title('Frequency-Magnitude Distribution', fontsize=18)
plt.xlabel('Magnitude', fontsize=14)
plt.ylabel('Log10 Count', fontsize=14)
plt.legend(numpoints=1)
plt.savefig('%s_catmagcomp.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing magnitude versus time for each earthquake')
def graph_mag_time(catalog, dirname):
"""Plot magnitudes vs. origin time."""
catalog = catalog[pd.notnull(catalog['mag'])]
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
times = catalog['time'].copy()
mags = catalog['mag'].copy()
plt.figure(figsize=(10, 6))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.plot_date(times, mags, alpha=0.7, markersize=2, c='b')
plt.xlim(min(times), max(times))
plt.title('Magnitude vs. Time', fontsize=20)
plt.savefig('%s_magvtime.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing event count by date and magnitude')
def graph_mag_count(catalog, dirname):
"""Graph event count grouped by magnitude and by date."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
mindate, maxdate = catalog['convtime'].min(), catalog['convtime'].max()
bincond = maxdate - mindate < pd.Timedelta(days=1460)
barwidth = 31 if bincond else 365
timedelt = pd.Timedelta(days=barwidth/2.)
minbin = qcu.round2bin(catalog['mag'].min()-0.1, 1, 'down')
maxbin = qcu.round2bin(catalog['mag'].max()+0.1, 1, 'up')
bins = np.arange(minbin, maxbin+0.1, 1)
catalog.loc[:, 'magbin'] = pd.cut(catalog['mag'], bins=bins, right=True)
maggroups = catalog['magbin'].sort_values().unique()
fig, axlist = plt.subplots(len(maggroups), sharex=True)
fig.set_size_inches(10, 14, forward=True)
for i, mbin in enumerate(maggroups):
trimcat = catalog[catalog['magbin'] == mbin]
if len(trimcat) == 0:
continue
datelist = [x.split('T')[0] for x in trimcat['time']]
datedf = pd.DataFrame({'date': datelist})
datedf['date'] = datedf['date'].astype('datetime64[ns]')
datedf = datedf.groupby([datedf['date'].dt.year,
datedf['date'].dt.month]).count() if bincond\
else datedf.groupby([datedf['date'].dt.year]).count()
evdates = datedf.index.tolist()
counts = datedf.date.tolist()
evdates = [date(x[0], x[1], 1) if bincond else date(x, 1, 1)
for x in evdates]
axlist[i].bar(evdates, counts, alpha=1, color='b', width=barwidth,
edgecolor='k')
axlist[i].set_ylabel('%d-%d' % (bins[i], bins[i+1]), fontsize=10)
plt.xlim(mindate - timedelt, maxdate - timedelt)
plt.ylim(0, max(counts))
axlist[i].get_yaxis().set_label_coords(-0.1, 0.5)
plt.xlabel('Date', fontsize=14)
for ax in axlist[:-1]:
ax.xaxis.set_ticks_position('none')
plt.savefig('%s_magtimecount.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing cumulative moment release')
def cumul_moment_release(catalog, dirname):
"""Graph cumulative moment release."""
catalog = catalog[pd.notnull(catalog['mag'])]
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
times = catalog['convtime']
minday, maxday = min(times), max(times)
mag0 = 10.**((3/2.)*(catalog['mag']+10.7))
mag0 = mag0 * 10.**(-7)
cumulmag0 = np.cumsum(mag0)
plt.figure(figsize=(10, 6))
plt.plot(times, cumulmag0, 'k-')
plt.xlabel('Date', fontsize=14)
plt.ylabel(r'Cumulative Moment Release (N$\times$m)', fontsize=14)
plt.xlim(minday, maxday)
plt.ylim(0)
plt.title('Cumulative Moment Release', fontsize=20)
colors = ['r', 'm', 'c', 'y', 'g']
largesteqs = catalog.sort_values('mag').tail(5)
for i, eq in enumerate(largesteqs.itertuples()):
plt.axvline(x=eq.time, color=colors[i], linestyle='--')
plt.savefig('%s_cumulmomentrelease.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing cumulative event types')
def graph_event_types(catalog, dirname):
"""Graph number of cumulative events by type of event."""
typedict = {}
for evtype in catalog['type'].unique():
typedict[evtype] = (catalog['type'] == evtype).cumsum()
plt.figure(figsize=(12, 6))
for evtype in typedict:
plt.plot_date(catalog['time'], typedict[evtype], marker=None,
linestyle='-', label=evtype)
plt.yscale('log')
plt.legend()
plt.xlim(min(catalog['time']), max(catalog['time']))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Cumulative number of events', fontsize=14)
plt.title('Cumulative Event Type', fontsize=20)
plt.savefig('%s_cumuleventtypes.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing possible number of duplicate events')
def cat_dup_search(catalog, dirname):
"""Graph possible number of duplicate events given various distances
and time differences.
"""
epochtimes = [qcu.to_epoch(row.time) for row in catalog.itertuples()]
tdifsec = np.asarray(abs(np.diff(epochtimes)))
lat1 = np.asarray(catalog.latitude[:-1])
lon1 = np.asarray(catalog.longitude[:-1])
lat2 = np.asarray(catalog.latitude[1:])
lon2 = np.asarray(catalog.longitude[1:])
ddelkm = [gps2dist_azimuth(lat1[i], lon1[i], lat2[i], lon2[i])[0] / 1000.
for i in range(len(lat1))]
diffdf = pd.DataFrame({'tdifsec': tdifsec, 'ddelkm': ddelkm})
kmlimits = [1, 2, 4, 8, 16, 32, 64, 128, 256]
tmax = 16
dtime = 0.05
timebins = np.arange(0, tmax+dtime/2, dtime)
numevents = np.empty([len(kmlimits), len(timebins)-1])
for jdx in range(len(kmlimits)):
cat_subset = diffdf[diffdf.ddelkm <= kmlimits[jdx]]
for idx in range(len(timebins)-1):
numevents[jdx][idx] = cat_subset[cat_subset.tdifsec.between(
timebins[idx], timebins[idx+1])].count()[0]
totmatch = np.transpose(np.cumsum(np.transpose(numevents), axis=0))
plt.figure(figsize=(10, 6))
for idx in range(len(kmlimits)):
times = timebins[1:]
matches = totmatch[idx]
lab = str(kmlimits[idx]) + ' km'
plt.plot(times, matches, label=lab)
plt.xlabel('Time (s)', fontsize=14)
plt.ylabel('Possible duplicate events', fontsize=14)
plt.xlim(0, tmax)
plt.ylim(0, np.amax(totmatch)+0.5)
plt.legend(loc=2, numpoints=1)
plt.title(('Cumulative number of events within X seconds\n'
'and Z km (Z specified in legend)'), fontsize=20)
plt.savefig('%s_catdupsearch.png' % dirname, dpi=300)
plt.close()
###############################################################################
###############################################################################
###############################################################################
def create_figures():
"""Generate and save all relevant figures and text files."""
parser = argparse.ArgumentParser()
parser.add_argument('catalog', nargs='?', type=str,
help='pick which catalog to download data from; to \
download data from all catalogs, use "preferred"')
parser.add_argument('startyear', nargs='?', type=int,
help='pick starting year')
parser.add_argument('endyear', nargs='?', type=int,
help='pick end year (to get a single year of data, \
enter same year as startyear)')
parser.add_argument('-mr', '--magrange', type=float, nargs=2,
default=[-5, 12],
help='give the magnitude range for downloading data \
(default range is from -5 to 12)')
parser.add_argument('-tw', '--timewindow', type=float, default=2,
help='change time window for finding duplicates \
(default is 2 seconds)')
parser.add_argument('-dw', '--distwindow', type=float, default=15,
help='change distance window for finding duplicates \
(default is 15 kilometers)')
parser.add_argument('-sf', '--specifyfile', type=str,
help='specify existing .csv file to use')
parser.add_argument('-fd', '--forcedownload', action='store_true',
help='forces downloading of data even if .csv file \
exists')
args = parser.parse_args()
minmag, maxmag = args.magrange
if args.specifyfile is None:
if not args.catalog:
sys.stdout.write('No catalog specified. Exiting...\n')
sys.exit()
elif not args.startyear:
sys.stdout.write('No starting year specified. Exiting...\n')
sys.exit()
elif not args.endyear:
sys.stdout.write('No ending year specified. Exiting...\n')
sys.exit()
catalog = args.catalog.lower()
startyear, endyear = map(int, [args.startyear, args.endyear])
download = args.forcedownload
dirname = '%s%s-%s' % (catalog, startyear, endyear) if catalog else\
'preferred%s-%s' % (startyear, endyear)
if download:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf = qcu.get_data(catalog, dirname, startyear=startyear,
endyear=endyear, minmag=minmag, maxmag=maxmag)
else:
# Python 2
try:
try:
datadf = pd.read_csv('%s/%s.csv' % (dirname, dirname))
except IOError:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf = qcu.get_data(catalog, dirname,
startyear=startyear, endyear=endyear, minmag=minmag,
maxmag=maxmag)
# Python 3
except:
try:
datadf = pd.read_csv('%s/%s.csv' % (dirname, dirname))
except FileNotFoundError:
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf = qcu.get_data(catalog, dirname,
startyear=startyear, endyear=endyear, minmag=minmag,
maxmag=maxmag)
else:
from shutil import copy2
dirname = '.'.join(args.specifyfile.split('.')[:-1])
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
datadf = | pd.read_csv(args.specifyfile) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = | pd.DataFrame([1, 2, 3], dtype=dtype) | pandas.DataFrame |
from backlight.trades import trades as module
import pytest
import pandas as pd
@pytest.fixture
def symbol():
return "usdjpy"
@pytest.fixture
def trades(symbol):
data = [1.0, -2.0, 1.0, 2.0, -4.0, 2.0, 1.0, 0.0, 1.0, 0.0]
index = pd.date_range(start="2018-06-06", freq="1min", periods=len(data))
trades = []
for i in range(0, len(data), 2):
trade = pd.Series(index=index[i : i + 2], data=data[i : i + 2], name="amount")
trades.append(trade)
trades = module.make_trades(symbol, trades)
return trades
def test_trades_ids(trades):
expected = [0, 1, 2, 3, 4]
assert trades.ids == expected
def test_trades_amount(trades):
data = [1.0, -2.0, 1.0, 2.0, -4.0, 2.0, 1.0, 0.0, 1.0, 0.0]
index = pd.date_range(start="2018-06-06", freq="1min", periods=len(data))
expected = pd.Series(data=data, index=index, name="amount")
pd.testing.assert_series_equal(trades.amount, expected)
def test_trades_get_any(trades):
data = [1.0, -2.0, -4.0, 2.0]
index = [
pd.Timestamp("2018-06-06 00:00:00"),
pd.Timestamp("2018-06-06 00:01:00"),
pd.Timestamp("2018-06-06 00:04:00"),
pd.Timestamp("2018-06-06 00:05:00"),
]
expected = pd.Series(data=data, index=index, name="amount")
result = trades.get_any(trades.index.minute.isin([0, 4, 5]))
pd.testing.assert_series_equal(result.amount, expected)
def test_trades_get_all(trades):
data = [-4.0, 2.0]
index = [ | pd.Timestamp("2018-06-06 00:04:00") | pandas.Timestamp |
# coding=utf-8
import pandas as pd
from mock import MagicMock
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from nose.tools import assert_raises, assert_equals
from sparkmagic.livyclientlib.command import Command
import sparkmagic.utils.constants as constants
from sparkmagic.livyclientlib.sendpandasdftosparkcommand import (
SendPandasDfToSparkCommand,
)
def test_send_to_scala():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._scala_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._scala_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_r():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._r_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARKR,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._r_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_python():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._pyspark_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_PYSPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._pyspark_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_should_create_a_valid_scala_expression():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
pandas_df_jsonized = """[{"A":1,"B":2}]"""
expected_scala_code = '''
val rdd_json_array = spark.sparkContext.makeRDD("""{}""" :: Nil)
val {} = spark.read.json(rdd_json_array)'''.format(
pandas_df_jsonized, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
assert_equals(
sparkcommand._scala_command(
input_variable_name, input_variable_value, output_variable_name
),
Command(expected_scala_code),
)
def test_should_create_a_valid_r_expression():
input_variable_name = "input"
input_variable_value = | pd.DataFrame({"A": [1], "B": [2]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Input file loading and caching system.
"""
import itertools
import json
import os
import pathlib
import re
import secrets
import sys
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.interpolate as scpi
import scipy.spatial.transform as scpt
import seaborn as sns
import swifter
from perceptree.common.cache import update_dict_recursively
from perceptree.common.configuration import Config
from perceptree.common.configuration import Configurable
from perceptree.common.graph_saver import GraphSaver
from perceptree.common.logger import Logger
from perceptree.common.logger import ParsingBar
from perceptree.common.logger import LoadingBar
from perceptree.common.math import carthesian_to_spherical
from perceptree.common.math import spherical_to_carthesian
from perceptree.common.util import dict_of_lists
from perceptree.common.util import parse_bool_string
from perceptree.common.util import tuple_array_to_numpy
from perceptree.data.treeio import TreeFile
from perceptree.data.treeio import TreeImage
from perceptree.data.treeio import TreeStatistic
class BaseDataLoader(Logger):
"""
Input file loading and caching system.
"""
@staticmethod
def _create_empty_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _compile_scores return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"tree_jod": int(),
"tree_jod_low": float(),
"tree_jod_high": float(),
"tree_jod_var": float()
}, index=[ ]).set_index([ "tree_id", "tree_variant_id" ])
@staticmethod
def _generate_reduced_scores(full_scores: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced scores from given full scores. """
return full_scores.reset_index() \
.drop([ "tree_variant_id" ], axis=1) \
.set_index([ "tree_id" ])
@staticmethod
def _create_empty_results() -> pd.DataFrame:
""" Create empty results dataframe as per _extract_results return. """
return pd.DataFrame({
"index": int(),
"first_tree_id": int(),
"first_tree_variant_id": int(),
"first_view_id": int(),
"first_view_variant_id": int(),
"second_tree_id": int(),
"second_tree_variant_id": int(),
"second_view_id": int(),
"second_view_variant_id": int(),
"worker_id": str(),
"choice": int()
}, index=[ ]).set_index("index")
@staticmethod
def _generate_reduced_results(full_results: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced results from given full results. """
return full_results.reset_index() \
.drop([ "first_tree_variant_id", "first_view_variant_id", "second_tree_variant_id", "second_view_variant_id" ], axis=1) \
.set_index([ "index" ])
@staticmethod
def _create_empty_view_catalogue() -> pd.DataFrame:
""" Create empty results dataframe as per _view_catalogue return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"view_type": str(),
"path": str(),
"json_path": str(),
"data": object()
}, index=[ ]).set_index(["tree_id", "view_id", "view_variant_id", "view_type"])
@staticmethod
def _generate_reduced_view_catalogue(full_view_catalogue: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced view catalogue from given full view catalogue. """
return full_view_catalogue.reset_index() \
.drop([ "tree_variant_id", "view_variant_id", "json_path" ], axis=1) \
.set_index([ "tree_id", "view_id", "view_type" ])
@staticmethod
def _create_empty_tree_catalogue() -> pd.DataFrame:
""" Create empty results dataframe as per _tree_catalogue return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"path": str(),
"json_path": str(),
"data": object()
}, index=[ ]).set_index(["tree_id", "tree_variant_id"])
@staticmethod
def _generate_reduced_tree_catalogue(full_tree_catalogue: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced tree catalogue from given full tree catalogue. """
return full_tree_catalogue.reset_index() \
.drop([ "tree_variant_id", "json_path" ], axis=1) \
.set_index([ "tree_id" ])
@staticmethod
def _create_empty_tree_data() -> dict:
""" Create empty results dict as per _tree_data return. """
return { }
@staticmethod
def _create_empty_available_features() -> dict:
""" Create empty results dict as per _available_features return. """
return { }
@staticmethod
def _create_empty_dataset_path() -> str:
""" Create empty results str as per _dataset_path return. """
return ""
@staticmethod
def _create_empty_dataset_meta() -> dict:
""" Create empty results dict as per _dataset_meta return. """
return { "unique_id": "EMPTY" }
@staticmethod
def _create_empty_indexed_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _index_scores return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"jod": float(),
"jod_low": float(),
"jod_high": float(),
"jod_var": float()
}, index=[ ]).set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"])
@staticmethod
def _generate_reduced_scores_indexed(full_scores_indexed: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced indexed scores from given full indexed scores. """
return full_scores_indexed.reset_index() \
.drop([ "tree_variant_id", "view_variant_id" ], axis=1) \
.set_index([ "tree_id", "view_id" ])
@staticmethod
def _create_empty_spherical_indexed_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _spherical_scores_indexed return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"jod": float(),
"jod_low": float(),
"jod_high": float(),
"jod_var": float()
}, index=[ ]).set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"])
def _index_scores(self, scores: pd.DataFrame) -> pd.DataFrame:
"""
Create indexed score data-frame, where -1st view is
for the complete tree.
:param scores: Input scores data-frame.
:return: Returns data-frame indexed by ("tree_id", "view_id"),
where view_id == -1 contains data for the whole tree. Result
contains following columns:
* tree_id, view_id - Integer index for unique tree/view.
* jod, jod_low, jod_high, jod_var - JOD properties.
"""
self.__l.info(f"Indexing {len(scores)} scores...")
if len(scores) <= 0:
self.__l.info(f"Input scores are empty, returning empty frame!")
return BaseDataLoader._create_empty_indexed_scores()
def convert_row(row):
view_count = (len(row) // 4) - 1
return pd.DataFrame(([ {
"tree_id": row["tree_id"],
"tree_variant_id": row["tree_variant_id"],
"view_id": -1,
"view_variant_id": 0,
# TODO - Add support for tree and view variants.
"jod": row["tree_jod"],
"jod_low": row["tree_jod_low"],
"jod_high": row["tree_jod_high"],
"jod_var": row["tree_jod_var"],
} ] if "tree_jod" in row else [ ])
+
([ {
"tree_id": row["tree_id"],
"tree_variant_id": row["tree_variant_id"],
"view_id": view_idx,
"view_variant_id": 0,
# TODO - Add support for tree and view variants.
"jod": row[f"view{view_idx}_jod"],
"jod_low": row[f"view{view_idx}_jod_low"],
"jod_high": row[f"view{view_idx}_jod_high"],
"jod_var": row[f"view{view_idx}_jod_var"],
} for view_idx in range(view_count) ]))
scores_indexed = pd.concat([ convert_row(row) for index, row in scores.reset_index().iterrows() ])
scores_indexed["tree_id"] = scores_indexed["tree_id"].astype("int64")
scores_indexed["tree_variant_id"] = scores_indexed["tree_variant_id"].astype("int64")
scores_indexed.set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"], inplace=True)
self.__l.info(f"\tIndexing complete, resulting in {len(scores_indexed)} records.")
return scores_indexed
def _check_view_tree_catalogue(self, base_path: str,
view_catalogue: pd.DataFrame,
tree_catalogue: pd.DataFrame,
scores_indexed: pd.DataFrame) -> bool:
"""
Check whether all necessary view are accounted for and
present in the data-set.
:param base_path: Base path where the data-set exists.
:param view_catalogue: Catalogue containing all information
about the views.
:param tree_catalogue: Catalogue containing all information
about trees.
:param scores_indexed: Indexed scores for trees and views.
:return: Returns True if all necessary views are present.
"""
self.__l.info(f"Checking view catalogue with {len(view_catalogue)} views...")
tree_count = len(view_catalogue.index.unique(level=0))
tree_variant_count = len(view_catalogue.index.unique(level=1))
view_count = len(view_catalogue.index.unique(level=2))
view_variant_count = len(view_catalogue.index.unique(level=3))
view_type_count = len(view_catalogue.index.unique(level=4))
expected_view_count = tree_count * tree_variant_count * view_count * view_variant_count * view_type_count
if len(view_catalogue) != expected_view_count:
self.__l.warning(f"\tView catalogue does not contain all expected "
f"views ({len(view_catalogue)} / {expected_view_count})!")
#return False
# Check views:
if len(view_catalogue) < 1000:
for index, view in view_catalogue.iterrows():
if not os.path.isfile(f"{base_path}/{view.path}"):
self.__l.warning(f"\tView catalogue contains non-existent view "
f"\"{view.path}\"!")
return False
if view.json_path and not os.path.isfile(f"{base_path}/{view.json_path}"):
self.__l.warning(f"\tView catalogue contains non-existent json description "
f"\"{view.json_path}\"!")
return False
else:
self.__l.warning(f"\tSkipping view catalog checking since it has {len(view_catalogue)} items!")
self.__l.info(f"\tView catalogue successfully checked!")
self.__l.info(f"Checking tree catalogue with {len(tree_catalogue)} trees...")
# Check .tree files:
for index, tree in tree_catalogue.iterrows():
if not os.path.isfile(f"{base_path}/{tree.path}"):
self.__l.warning(f"\tView catalogue contains non-existent tree "
f"\"{tree.path}\"!")
return False
if tree.json_path and not os.path.isfile(f"{base_path}/{tree.json_path}"):
self.__l.warning(f"\tView catalogue contains non-existent json description "
f"\"{tree.json_path}\"!")
return False
self.__l.info(f"\tTree catalogue successfully checked!")
return True
def _prepare_spherical_knots(self, variant_jsons: dict,
tree_scores: pd.DataFrame) -> (dict, np.array):
""" Prepare tree view knot points for spherical interpolation. """
base_view_json = variant_jsons[(0, 0)]
base_height = base_view_json["state"]["camera"]["height"]["base"]
base_distance = base_view_json["state"]["camera"]["distance"]["base"]
origin_pos = np.array([ 0.0, 0.0, 0.0 ])
bottom_pos = np.array([ 0.0, -base_distance, 0.0 ])
top_pos = np.array([ 0.0, base_distance, 0.0 ])
base_pos = np.array([ base_distance, base_height, 0.0 ])
scores = tree_scores.set_index("view_id")
knot_dict = {
view_id: {
"score": scores.loc[view_id].jod,
"pos": scpt.Rotation.from_euler(
"XYZ", variant_json["tree"]["rotation"],
degrees=False
).apply(variant_json["camera"]["pos"])
}
for (view_id, variant_id), variant_json in variant_jsons.items()
if variant_id == 0
}
knot_dict[-3] = {
"score": scores.loc[-1].jod,
"pos": origin_pos
}
knot_dict[-2] = {
"score": scores.loc[-1].jod,
"pos": bottom_pos
}
knot_dict[-1] = {
"score": scores.loc[-1].jod,
"pos": top_pos
}
knots = np.array([
[spherical[1], spherical[2], score["score"]]
for view_id, score in knot_dict.items()
for spherical in [carthesian_to_spherical(score["pos"])]
])
return knot_dict, knots
def _prepare_spherical_lut(self, knots: np.array,
method: str) -> (object, dict):
""" Calculate spherical interpolation look-up table for given knots. """
if method == "rbf":
lut = scpi.Rbf(knots[:, 0], knots[:, 1], knots[:, 2], function="multiquadric")
lut_kws = { }
if method == "wrap_rbf":
def great_circle_distance(u: np.array, v: np.array) -> float:
""" Calculate great circle distance. """
u_lats, v_lats = u[1], v[1]
u_lons, v_lons = u[0], v[0]
delta_lons = np.abs(v_lons - u_lons)
return np.arctan2(
np.sqrt(
(np.cos(v_lats) * np.sin(delta_lons)) ** 2.0 +
(np.cos(u_lats) * np.sin(v_lats) - np.sin(u_lats) * np.cos(v_lats) * np.cos(delta_lons)) ** 2.0
),
np.sin(u_lats) * np.sin(v_lats) + np.cos(u_lats) * np.cos(v_lats) * np.cos(delta_lons)
)
def wrap_around_norm(u: np.array, v: np.array) -> np.array:
return great_circle_distance(u, v)
lut = scpi.Rbf(knots[:, 0], knots[:, 1], knots[:, 2], function="gaussian", norm=wrap_around_norm)
lut_kws = { }
elif method == "smooth":
lut = scpi.SmoothSphereBivariateSpline(knots[:, 0], knots[:, 1], knots[:, 2], s=32.0)
lut_kws = { "grid": False }
elif method == "rect":
orig_resolution = (
np.where(knots[1:, 0] == knots[0, 0])[0][0] + 1,
np.where((knots[1:, 1] - knots[:-1, 1]) > 0.0)[0][0] + 1
)
fit_knots = [
knots[:orig_resolution[0], 0],
knots[::orig_resolution[1], 1],
knots[:, 2].reshape((orig_resolution[0], orig_resolution[1]))
]
fit_knots[0][0] += 0.0001
fit_knots[1][-1] -= 0.0001
lut = scpi.RectSphereBivariateSpline(fit_knots[0], fit_knots[1], fit_knots[2],
pole_continuity=False)
lut_kws = { "grid": False }
elif method == "lsq":
orig_resolution = (
np.where(knots[1:, 0] == knots[0, 0])[0][0] + 1,
np.where((knots[1:, 1] - knots[:-1, 1]) > 0.0)[0][0] + 1
)
fit_knots = [
knots[:orig_resolution[0], 0],
knots[::orig_resolution[1], 1],
knots[:, 2].reshape((orig_resolution[0], orig_resolution[1]))
]
fit_knots[0][0] += 0.0001
fit_knots[0][-1] -= 0.0001
fit_knots[1][0] += 0.0001
fit_knots[1][-1] -= 0.0001
lut = scpi.LSQSphereBivariateSpline(knots[:, 0], knots[:, 1], knots[:, 2], fit_knots[0],
fit_knots[1])
lut_kws = { "grid": False }
return lut, lut_kws
def _prepare_spherical_smooth_grid(self, knots: np.array,
lut: object, lut_kws: dict,
resolution: tuple, visualize: bool = False,
visualize_knots: bool = False) -> (np.array, np.array):
""" Calculate smooth grid using provided knot points and a look-up table. """
smooth_grid = np.meshgrid(np.linspace(0.0, np.pi, resolution[0]),
np.linspace(0.0, 2.0 * np.pi, resolution[1]))
smooth_data = np.reshape(lut(smooth_grid[0].ravel(), smooth_grid[1].ravel(), **lut_kws), resolution)
if visualize:
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot()
vmin, vmax = np.min(smooth_data), np.max(smooth_data)
cmap = plt.get_cmap("viridis")
ax.imshow(smooth_data, origin="lower", extent=(0.0, np.pi, 0.0, 2.0 * np.pi),
interpolation="nearest", cmap=cmap, vmin=vmin, vmax=vmax)
if visualize_knots:
ax.scatter(knots[:, 0], knots[:, 1], c=knots[:, 2], s=45,
cmap=cmap, vmin=vmin, vmax=vmax)
ax.scatter(knots[:, 0], knots[:, 1], c="red", s=5)
ax.set_xlabel("Phi")
ax.set_ylabel("Theta")
plt.show()
return smooth_data, smooth_grid
def _prepare_spherical_map(self, knots: np.array,
resolutions: List[tuple],
methods: List[str],
visualize_map: bool = False,
visualize_views: bool = False,
visualize_view_count: int = 10) -> (object, dict):
""" Create final look-up table for spherical coordinate views, mapping to scores. """
if len(resolutions) == 0 or len(resolutions) != len(methods):
return None, { }
current_knots = knots
for idx, (resolution, method) in enumerate(zip(resolutions, methods)):
is_first = idx == 0
is_last = idx == len(resolutions) - 1
lut, lut_kws = self._prepare_spherical_lut(
knots=current_knots, method=method
)
if not is_last:
smooth_data, smooth_grid = self._prepare_spherical_smooth_grid(
knots=current_knots, lut=lut, lut_kws=lut_kws,
resolution=resolution,
visualize=visualize_map,
visualize_knots=is_first
)
current_knots = np.array([
[ phi, theta, score ]
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
if visualize_views:
smooth_data, smooth_grid = self._prepare_spherical_smooth_grid(
knots=current_knots, lut=lut, lut_kws=lut_kws,
resolution=resolutions[-1],
visualize=visualize_map,
visualize_knots=False
)
points = np.array([
spherical_to_carthesian([ 1.0, phi, theta ])
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
colors = np.array([
score
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
colors = (colors - np.min(colors)) / (np.max(colors) - np.min(colors))
cmap = plt.get_cmap("viridis")
fig = plt.figure(figsize=(4 * visualize_view_count, 4))
for idx, rotation in enumerate(np.linspace(0.0, 360.0, visualize_view_count + 1)[:-1]):
ax = fig.add_subplot(1, visualize_view_count, idx + 1, projection="3d")
ax.plot_surface(points[:, 0].reshape(smooth_data.shape),
points[:, 1].reshape(smooth_data.shape),
points[:, 2].reshape(smooth_data.shape),
rstride=1, cstride=1,
facecolors=cmap(colors.reshape(smooth_data.shape)))
ax.set_axis_off()
ax.view_init(0, rotation)
plt.show()
return lut, lut_kws
def _prepare_spherical_scores(self, variant_jsons: dict,
tree_scores: pd.DataFrame,
lut: object, lut_kws: dict) -> dict:
""" Calculate interpolated variant scores using look-up table. """
scores = tree_scores.set_index("view_id")
spherical_scores = {
(view_id, variant_id): {
"car_pos": view_pos,
"sph_pos": sph_pos,
"base_score": scores.loc[view_id].jod,
"score": lut(sph_pos[0], sph_pos[1], **lut_kws)
}
for (view_id, variant_id), variant_json in variant_jsons.items()
for view_pos in [
scpt.Rotation.from_euler(
"XYZ", variant_json["tree"]["rotation"],
degrees=False
).apply(variant_json["camera"]["pos"])
]
for sph_pos in [ carthesian_to_spherical(view_pos) ]
}
# Add spherical score for the complete tree.
spherical_scores[( -1, 0 )] = {
"car_pos": np.array([ 0.0, 0.0, 0.0 ]),
"sph_pos": carthesian_to_spherical(np.array([ 0.0, 0.0, 0.0 ])),
"base_score": scores.loc[-1].jod,
"score": scores.loc[-1].jod,
}
return spherical_scores
def _prepare_spherical_indexed_scores(self, base_path: str,
view_catalogue: pd.DataFrame,
tree_catalogue: pd.DataFrame,
scores_indexed: pd.DataFrame) -> pd.DataFrame:
"""
Augment indexed score data-frame with spherical interpolation
for each view/tree variant.
:param base_path: Base path where the data-set exists.
:param view_catalogue: Catalogue containing all information
about the views.
:param tree_catalogue: Catalogue containing all information
about trees.
:param scores_indexed: Indexed scores for trees and views.
:return: Returns data-frame indexed by ("tree_id", "view_id",
"view_variant_id"), where view_id == -1 contains data for
the whole tree. Result contains following columns:
* tree_id, view_id, view_variant_id - Integer index for
unique tree/view and the specific view variant.
* jod, jod_low, jod_high, jod_var - JOD properties.
"""
if len(scores_indexed) <= 0:
self.__l.info(f"Input scores are empty, returning empty spherical scores frame!")
return BaseDataLoader._create_empty_spherical_indexed_scores()
all_views = view_catalogue.reset_index()
all_scores = scores_indexed.reset_index()
tree_ids = scores_indexed.index.unique(level=0)
# TODO - Support tree variants.
tree_variant = 0
spherical_indexed_scores = [ ]
self.__l.info(f"Preparing spherical indexed scores for {len(tree_catalogue)} trees...")
loading_progress = LoadingBar("", max=len(tree_catalogue))
for tree_id in tree_ids:
# Calculate interpolations for each tree.
tree_views = all_views[
(all_views.tree_id == tree_id) &
(all_views.tree_variant_id == tree_variant)
]
tree_scores = all_scores[
(all_scores.tree_id == tree_id) &
(all_scores.tree_variant_id == tree_variant)
]
# Prepare variants and load descriptions.
variant_jsons = { }
variants = set()
for idx, row in tree_views.iterrows():
variants.add(( row.tree_id, row.tree_variant_id, row.view_id, row.view_variant_id ))
if (row.view_id, row.view_variant_id) in variant_jsons or row.json_path == "":
continue
with open(f"{base_path}/{row.json_path}", "r") as jf:
variant_jsons[(row.view_id, row.view_variant_id)] = json.load(jf)
# Add variant for the complete tree.
variants.add(( tree_id, 0, -1, 0 ))
if len(variant_jsons) == 0:
# No variants or missing json descriptions -> Use existing scores.
for variant in variants:
scores = scores_indexed.loc[(variant[0], variant[1], variant[2], variant[3])]
spherical_indexed_scores.append((
variant[0], variant[1], variant[2], variant[3],
# Use the same JOD for variant as the base.
scores.jod,
scores.jod, scores.jod_low, scores.jod_high, scores.jod_var
))
continue
# Sanity check, we should always have at least view, variant with ID (0, 0).
assert((0, 0) in variant_jsons)
# Calculate spherical interpolation map.
knot_dict, knots = self._prepare_spherical_knots(
variant_jsons=variant_jsons, tree_scores=tree_scores
)
lut, lut_kws = self._prepare_spherical_map(
# TODO - Parameterize this by script arguments.
knots=knots,
resolutions=[ (36, 36), (72, 72) ],
methods=[ "wrap_rbf", "rbf" ],
visualize_map=False, visualize_views=False,
visualize_view_count=10
)
# Interpolate variant scores using the spherical map.
spherical_scores = self._prepare_spherical_scores(
variant_jsons=variant_jsons,
tree_scores=tree_scores,
lut=lut, lut_kws=lut_kws
)
# Save results.
for variant in variants:
scores = scores_indexed.loc[(variant[0], variant[1], variant[2], 0)]
new_scores = spherical_scores[(variant[2], variant[3])]
assert(scores.jod == new_scores["base_score"])
spherical_indexed_scores.append((
variant[0], variant[1], variant[2], variant[3],
# Use the new interpolated JOD score.
new_scores["score"],
scores.jod, scores.jod_low, scores.jod_high, scores.jod_var
))
loading_progress.next(1)
loading_progress.finish()
spherical_scores_indexed = pd.DataFrame(
data=spherical_indexed_scores,
columns=(
"tree_id", "tree_variant_id", "view_id", "view_variant_id",
"jod", "base_jod", "jod_low", "jod_high", "jod_var"
)
)
spherical_scores_indexed.set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"], inplace=True)
spherical_scores_indexed.sort_index(inplace=True)
self.__l.info(f"\tDone, prepared {len(spherical_indexed_scores)} spherical scores.")
return spherical_scores_indexed
def _load_tree_data(self, base_path: str = "",
tree_catalogue: pd.DataFrame = pd.DataFrame(),
load_node_data: bool = True,
allow_preloaded: bool = False) -> Dict[Tuple[int, int], TreeFile]:
""" Load all tree data files from given catalogue. """
self.__l.info(f"Loading tree data from {len(tree_catalogue)} .tree files...")
tree_data = { }
parsing_progress = ParsingBar("", max=len(tree_catalogue))
for index, tree in tree_catalogue.iterrows():
if allow_preloaded and tree.data is not None:
tree_data[index] = tree.data
else:
tree_data[index] = TreeFile(
file_path=f"{base_path}/{tree.path}",
load_node=load_node_data,
) if tree.path else None
parsing_progress.next(1)
parsing_progress.finish()
self.__l.info(f"\tDone, loaded {len(tree_data)} tree files.")
return tree_data
def _determine_available_features(self,
view_catalogue: pd.DataFrame,
tree_data: Dict[Tuple[int, int], TreeFile],
load_node_data: bool) -> dict:
""" Create a hierarchy of feature names available for use. """
self.__l.info(f"Determining available features...")
available_features = {
"stat": np.unique([
name
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data
for name, item in tree.dynamic_meta_data["stats"].items()
if TreeStatistic.is_stat_dict(item)
]),
"image": np.unique([
name
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data and "visual" in tree.dynamic_meta_data["stats"]
for name, item in tree.dynamic_meta_data["stats"]["visual"].items()
if TreeImage.is_image_dict(item)
]),
"other": dict_of_lists([
v.split(".") for v in np.unique([
f"{name}.{element}"
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data
for name, item in tree.dynamic_meta_data["stats"].items()
if not TreeStatistic.is_stat_dict(item) and name != "visual"
for element in item.keys()
])
]),
"view": view_catalogue.reset_index().view_type.unique(),
# TODO - Detect available skeleton features?
"skeleton": [ "segment", "position", "thickness" ] if load_node_data else [ ]
}
totals = { name: len(features) for name, features in available_features.items() }
self.__l.info(f"\tDone, found { totals } available features.")
return available_features
def _load_empty(self):
""" Load empty data definitions. """
self._full_results = self._create_empty_results()
self._results = self._generate_reduced_results(
full_results=self._full_results
)
self._users = self._create_empty_users()
self._full_scores = self._create_empty_scores()
self._scores = self._generate_reduced_scores(
full_scores=self._full_scores
)
self._full_scores_indexed = self._create_empty_indexed_scores()
self._scores_indexed = self._generate_reduced_scores_indexed(
full_scores_indexed=self._full_scores_indexed
)
self._full_view_catalogue = self._create_empty_view_catalogue()
self._view_catalogue = self._generate_reduced_view_catalogue(
full_view_catalogue=self._full_view_catalogue
)
self._full_tree_catalogue = self._create_empty_tree_catalogue()
self._tree_catalogue = self._generate_reduced_tree_catalogue(
full_tree_catalogue=self._full_tree_catalogue
)
self._spherical_scores_indexed = self._create_empty_spherical_indexed_scores()
self._tree_data = self._create_empty_tree_data()
self._available_features = self._create_empty_available_features()
self._view_base_path = self._create_empty_dataset_path()
self._dataset_meta = self._create_empty_dataset_meta()
def _load_as_dataset(self, dataset_path: str, use_dithered: bool,
use_augment: bool, use_augment_variants: Optional[int]):
""" Load data as a pre-exported data-set. """
results_path = f"{dataset_path}/results.csv"
if not os.path.isfile(results_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain results.csv!")
users_path = f"{dataset_path}/users.csv"
if not os.path.isfile(users_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain users.csv!")
scores_path = f"{dataset_path}/scores.csv"
if not os.path.isfile(scores_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain scores.csv!")
scores_indexed_path = f"{dataset_path}/scores_indexed.csv"
if not os.path.isfile(scores_indexed_path):
self.__l.warning(f"Dataset at \"{dataset_path}\" does not contain scores_indexed.csv, using a dummy!")
dummy_df = BaseDataLoader._create_empty_indexed_scores()
dummy_df.to_csv(scores_indexed_path, sep=";", index=True)
spherical_scores_indexed_path = f"{dataset_path}/spherical_scores_indexed.csv"
if not os.path.isfile(spherical_scores_indexed_path):
self.__l.warning(f"Dataset at \"{dataset_path}\" does not contain spherical_scores_indexed.csv, using a dummy!")
dummy_df = BaseDataLoader._create_empty_spherical_indexed_scores()
dummy_df.to_csv(spherical_scores_indexed_path, sep=";", index=True)
view_catalogue_path = f"{dataset_path}/view_catalogue.csv"
if not os.path.isfile(view_catalogue_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain view_catalogue.csv!")
tree_catalogue_path = f"{dataset_path}/tree_catalogue.csv"
if not os.path.isfile(tree_catalogue_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain tree_catalogue.csv!")
dataset_meta_path = f"{dataset_path}/dataset_meta.json"
if not os.path.isfile(dataset_meta_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain dataset_meta.json!")
results = pd.read_csv(results_path, sep=";")
if "first_view_variant_id" not in results:
# Old-style dataset -> Add new columns:
results["first_tree_variant_id"] = 0
results["first_view_variant_id"] = 0
results["second_tree_variant_id"] = 0
results["second_view_variant_id"] = 0
users = pd.read_csv(users_path, sep=";")
users.set_index(["worker_id"], inplace=True)
scores = pd.read_csv(scores_path, sep=";")
if "tree_variant_id" not in scores:
# Old-style dataset -> Add new columns:
scores["tree_variant_id"] = 0
scores["tree_id"] = scores["tree_id"].astype(int)
scores.set_index(["tree_id", "tree_variant_id"])
view_catalogue = | pd.read_csv(view_catalogue_path, sep=";") | pandas.read_csv |
"""
Overview
--------
This module implements the Multiple Imputation through Chained
Equations (MICE) approach to handling missing data in statistical data
analyses. The approach has the following steps:
0. Impute each missing value with the mean of the observed values of
the same variable.
1. For each variable in the data set with missing values (termed the
'focus variable'), do the following:
1a. Fit an 'imputation model', which is a regression model for the
focus variable, regressed on the observed and (current) imputed values
of some or all of the other variables.
1b. Impute the missing values for the focus variable. Currently this
imputation must use the 'predictive mean matching' (pmm) procedure.
2. Once all variables have been imputed, fit the 'analysis model' to
the data set.
3. Repeat steps 1-2 multiple times and combine the results using a
'combining rule' to produce point estimates of all parameters in the
analysis model and standard errors for them.
The imputations for each variable are based on an imputation model
that is specified via a model class and a formula for the regression
relationship. The default model is OLS, with a formula specifying
main effects for all other variables.
The MICE procedure can be used in one of two ways:
* If the goal is only to produce imputed data sets, the MICEData class
can be used to wrap a data frame, providing facilities for doing the
imputation. Summary plots are available for assessing the performance
of the imputation.
* If the imputed data sets are to be used to fit an additional
'analysis model', a MICE instance can be used. After specifying the
MICE instance and running it, the results are combined using the
`combine` method. Results and various summary plots are then
available.
Terminology
-----------
The primary goal of the analysis is usually to fit and perform
inference using an 'analysis model'. If an analysis model is not
specified, then imputed datasets are produced for later use.
The MICE procedure involves a family of imputation models. There is
one imputation model for each variable with missing values. An
imputation model may be conditioned on all or a subset of the
remaining variables, using main effects, transformations,
interactions, etc. as desired.
A 'perturbation method' is a method for setting the parameter estimate
in an imputation model. The 'gaussian' perturbation method first fits
the model (usually using maximum likelihood, but it could use any
statsmodels fit procedure), then sets the parameter vector equal to a
draw from the Gaussian approximation to the sampling distribution for
the fit. The 'bootstrap' perturbation method sets the parameter
vector equal to a fitted parameter vector obtained when fitting the
conditional model to a bootstrapped version of the data set.
Class structure
---------------
There are two main classes in the module:
* 'MICEData' wraps a Pandas dataframe, incorporating information about
the imputation model for each variable with missing values. It can
be used to produce multiply imputed data sets that are to be further
processed or distributed to other researchers. A number of plotting
procedures are provided to visualize the imputation results and
missing data patterns. The `history_func` hook allows any features
of interest of the imputed data sets to be saved for further
analysis.
* 'MICE' takes both a 'MICEData' object and an analysis model
specification. It runs the multiple imputation, fits the analysis
models, and combines the results to produce a `MICEResults` object.
The summary method of this results object can be used to see the key
estimands and inferential quantities.
Notes
-----
By default, to conserve memory 'MICEData' saves very little
information from one iteration to the next. The data set passed by
the user is copied on entry, but then is over-written each time new
imputations are produced. If using 'MICE', the fitted
analysis models and results are saved. MICEData includes a
`history_callback` hook that allows arbitrary information from the
intermediate datasets to be saved for future use.
References
----------
<NAME>: 'Multiple Imputation: A Primer', Stat Methods Med Res,
1999.
TE Raghunathan et al.: 'A Multivariate Technique for Multiply
Imputing Missing Values Using a Sequence of Regression Models', Survey
Methodology, 2001.
SAS Institute: 'Predictive Mean Matching Method for Monotone Missing
Data', SAS 9.2 User's Guide, 2014.
<NAME> al.: 'Multiple Imputation with Diagnostics (mi) in R:
Opening Windows into the Black Box', Journal of Statistical Software,
2009.
"""
import pandas as pd
import numpy as np
import patsy
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.regression.linear_model import OLS
from collections import defaultdict
_mice_data_example_1 = """
>>> imp = mice.MICEData(data)
>>> imp.set_imputer('x1', formula='x2 + np.square(x2) + x3')
>>> for j in range(20):
... imp.update_all()
... imp.data.to_csv('data%02d.csv' % j)"""
_mice_data_example_2 = """
>>> imp = mice.MICEData(data)
>>> j = 0
>>> for data in imp:
... imp.data.to_csv('data%02d.csv' % j)
... j += 1"""
class PatsyFormula(object):
"""
A simple wrapper for a string to be interpreted as a Patsy formula.
"""
def __init__(self, formula):
self.formula = "0 + " + formula
class MICEData(object):
__doc__ = """\
Wrap a data set to allow missing data handling with MICE.
Parameters
----------
data : Pandas data frame
The data set, which is copied internally.
perturbation_method : string
The default perturbation method
k_pmm : int
The number of nearest neighbors to use during predictive mean
matching. Can also be specified in `fit`.
history_callback : function
A function that is called after each complete imputation
cycle. The return value is appended to `history`. The
MICEData object is passed as the sole argument to
`history_callback`.
Examples
--------
Draw 20 imputations from a data set called `data` and save them in
separate files with filename pattern `dataXX.csv`. The variables
other than `x1` are imputed using linear models fit with OLS, with
mean structures containing main effects of all other variables in
`data`. The variable named `x1` has a conditional mean structure
that includes an additional term for x2^2.
%(_mice_data_example_1)s
Impute using default models, using the MICEData object as an
iterator.
%(_mice_data_example_2)s
Notes
-----
Allowed perturbation methods are 'gaussian' (the model parameters
are set to a draw from the Gaussian approximation to the posterior
distribution), and 'boot' (the model parameters are set to the
estimated values obtained when fitting a bootstrapped version of
the data set).
`history_callback` can be implemented to have side effects such as
saving the current imputed data set to disk.
""" % {'_mice_data_example_1': _mice_data_example_1,
'_mice_data_example_2': _mice_data_example_2}
def __init__(self, data, perturbation_method='gaussian',
k_pmm=20, history_callback=None):
if data.columns.dtype != np.dtype('O'):
msg = "MICEData data column names should be string type"
raise ValueError(msg)
self.regularized = dict()
# Drop observations where all variables are missing. This
# also has the effect of copying the data frame.
self.data = data.dropna(how='all').reset_index(drop=True)
self.history_callback = history_callback
self.history = []
self.predict_kwds = {}
# Assign the same perturbation method for all variables.
# Can be overridden when calling 'set_imputer'.
self.perturbation_method = defaultdict(lambda:
perturbation_method)
# Map from variable name to indices of observed/missing
# values.
self.ix_obs = {}
self.ix_miss = {}
for col in self.data.columns:
ix_obs, ix_miss = self._split_indices(self.data[col])
self.ix_obs[col] = ix_obs
self.ix_miss[col] = ix_miss
# Most recent model instance and results instance for each variable.
self.models = {}
self.results = {}
# Map from variable names to the conditional formula.
self.conditional_formula = {}
# Map from variable names to init/fit args of the conditional
# models.
self.init_kwds = defaultdict(lambda: dict())
self.fit_kwds = defaultdict(lambda: dict())
# Map from variable names to the model class.
self.model_class = {}
# Map from variable names to most recent params update.
self.params = {}
# Set default imputers.
for vname in data.columns:
self.set_imputer(vname)
# The order in which variables are imputed in each cycle.
# Impute variables with the fewest missing values first.
vnames = list(data.columns)
nmiss = [len(self.ix_miss[v]) for v in vnames]
nmiss = np.asarray(nmiss)
ii = np.argsort(nmiss)
ii = ii[sum(nmiss == 0):]
self._cycle_order = [vnames[i] for i in ii]
self._initial_imputation()
self.k_pmm = k_pmm
def next_sample(self):
"""
Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes.
"""
self.update_all(1)
return self.data
def _initial_imputation(self):
"""
Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values.
"""
for col in self.data.columns:
di = self.data[col] - self.data[col].mean()
di = np.abs(di)
ix = di.idxmin()
imp = self.data[col].loc[ix]
self.data[col].fillna(imp, inplace=True)
def _split_indices(self, vec):
null = | pd.isnull(vec) | pandas.isnull |
import os
import sys
import datetime
import numpy as np
import scipy.signal
import pandas as pd
import yfinance as yf
from contextlib import contextmanager
from src.utils_date import add_days
from src.utils_date import prev_weekday
#from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
ERROR_NO_MINUTE_DATA_YTD = 'Skip: Missing minute-level data for yesterday'
ERROR_NO_MINUTE_DATA_TDY = 'Skip: Missing minute-level data for today'
ERROR_CANDLES_PER_DAY = 'Skip: Insufficient candles today ({} less than {})'
ERROR_NULL_COL = 'Skip: NULL value in df_i columns ({})'
ERROR_NULL_DAY_LEVEL_IND = 'Skip: NULL value in day-level indicators'
ERROR_PRICES_D_NOT_UPDATE = 'Error: prices_d not updated, latest date found: {}'
@contextmanager
def suppress_stdout():
'''Decorator to supress function output to sys.stdout'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def get_ls_sym():
'''Returns list of tickers from nasdaqtrader.com
Duplicates and strings with length > 5 are removed
Returns:
ls_sym (List of str)
'''
#df_symbols = get_nasdaq_symbols()
#ls_sym = df_symbols.index.to_list()
ls_urls = [
'http://ftp.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt'
,'http://ftp.nasdaqtrader.com/dynamic/SymDir/otherlisted.txt'
]
ls_sym = []
for i, url in enumerate(ls_urls):
df = pd.read_csv(url, sep='|')
for col in list(df):
if col in ['ACT Symbol', 'Symbol']: df['sym'] = df[col]
ls_sym+=df[df['sym'].str.len()<=5]['sym'].to_list()
ls_sym = list(set(ls_sym)) # remove duplicates
return ls_sym
def get_df_prices(sym, start_str, end_str):
'''Return dataframe with minute-level stock price data
from start date to end date (inclusive).
Args:
sym (str): Ticker symbol e.g. 'BYND'
start_str (str): Start date string e.g. '2020-07-18'
end_str (str): End date string e.g. '2020-07-18'
Returns:
df (pandas.Dataframe)
'''
assert start_str <= end_str
end_str_mod=add_days(end_str, 3)
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str_mod,
interval='1m',
progress=0,
prepost=True).reset_index()
is_date_range = ((df['Datetime'].dt.date.astype('str')>=start_str)
&(df['Datetime'].dt.date.astype('str')<=end_str))
df = df[is_date_range]
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
is_reg_hours = ((df['Datetime'].dt.time.astype('str')>='09:30:00')
&(df['Datetime'].dt.time.astype('str')<='15:59:00'))
df['is_reg_hours'] = np.where(is_reg_hours, 1, 0)
df['sym'] = sym
df = df.rename(columns={
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Adj Close':'adj_close',
'Volume':'volume'
})
ls_col = [
'sym',
'datetime',
'open',
'high',
'low',
'adj_close',
'volume',
'is_reg_hours',
]
return df[ls_col]
def add_rsi(df, rsi_period):
'''Returns dataframe with additional columns:
rsi (float)
Args:
df (pandas.DataFrame): Must be index sorted by datetime:
adj_close (float)
rsi_period (int): Number of rsi periods
Returns:
df (pandas.DataFrame)
'''
chg = df['adj_close'].diff(1)
gain = chg.mask(chg<0,0)
loss = chg.mask(chg>0,0)
avg_gain = gain.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
avg_loss = loss.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
rs = abs(avg_gain/avg_loss)
rsi = 100 - (100/(1+rs))
df['rsi14'] = rsi
return df
def add_vwap(df):
'''Returns dataframe with additional columns:
vwap (float): Volume Weighted Average Price
vwap_var (float): % variance of close from vwap
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
open
high
low
adj_close
volume
Returns:
df (pandas.DataFrame)
'''
df['vwap'] = (df['volume']*(df['high']+df['low']+df['adj_close'])/3).cumsum()/df['volume'].cumsum()
df['vwap'] = df['vwap'].fillna(df['adj_close'])
df['vwap_var'] = (df['adj_close']/df['vwap'])-1
return df
def get_df_i(sym, date_str, live_data, db, num_candles_min = 200):
'''Returns interim dataframe with price data and
trading indicators for input symbol and date
Args:
sym (str)
date_str (str)
live_data (int)
db (Database object)
num_candles_min (int)
Returns:
df_i (pandas.Dataframe)
'''
start_str = prev_weekday(date_str) #start 1 day early to get prev day data for rsi etc
end_str = add_days(date_str, 3) #extend end date string due to bug
if live_data:
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str,
interval='1m',
prepost = False,
progress=0).reset_index()
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
df = df.rename(columns={'Adj Close':'adj_close',
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Volume':'volume'})
else:
q = '''
SELECT *
FROM prices_m
WHERE is_reg_hours = 1
AND sym='{}'
AND DATE(datetime)>='{}'
AND DATE(datetime)<='{}'
ORDER BY datetime
'''.format(sym, start_str, date_str)
df = pd.read_sql(q, db.conn)
df['datetime'] = pd.to_datetime(df['datetime'])
df['date_str'] = df['datetime'].dt.date.astype('str')
if df[df['date_str']==start_str].empty:
raise Exception(ERROR_NO_MINUTE_DATA_YTD)
if df[df['date_str']==date_str].empty:
raise Exception(ERROR_NO_MINUTE_DATA_TDY)
num_candles_today = df[df['date_str']==date_str].shape[0]
if num_candles_today<num_candles_min and not live_data:
raise Exception(ERROR_CANDLES_PER_DAY.format(num_candles_today, num_candles_min))
df = df[df['date_str']<=date_str]
df = df[df['date_str']>=start_str]
df['sma9'] = df['adj_close'].rolling(9).mean()
df['sma90'] = df['adj_close'].rolling(90).mean()
df['sma180'] = df['adj_close'].rolling(180).mean()
df['sma180'] = df['sma180'].fillna(df['sma90'])
df['sma9_var'] = (df['adj_close']/df['sma9'])-1
df['sma180_var'] = (df['adj_close']/df['sma180'])-1
df = add_rsi(df, 14)
df['spread']=((df['adj_close']/df['open'])-1).abs()
df['spread14_e']=df['spread'].ewm(span=14).mean()
df['volume14'] = df['volume'].rolling(14).mean()
df['volume34'] = df['volume'].rolling(34).mean()
df['volume14_34_var'] = (df['volume14']/df['volume34'])-1
df['volume14_34_var'] = df['volume14_34_var'].fillna(0.0)
prev_close = df[df['date_str']==start_str]['adj_close'].to_list()[-1]
prev_floor = df[df['date_str']==start_str]['adj_close'].min()
prev_ceil = df[df['date_str']==start_str]['adj_close'].max()
df['prev_close'] = prev_close
df['prev_close_var'] = df['adj_close']/prev_close - 1
df['prev_floor_var'] = (df['adj_close']/prev_floor)-1
df['prev_ceil_var'] = (df['adj_close']/prev_ceil)-1
df['candle_score'] = df['adj_close']/df['open']-1
df['prev1_candle_score'] = df['candle_score'].shift(1)
df['prev2_candle_score'] = df['candle_score'].shift(2)
df['prev3_candle_score'] = df['candle_score'].shift(3)
df = df[df['date_str']==date_str]
df = add_vwap(df)
df = df.rename(columns={'adj_close':'close'})
ls_col = [
'datetime',
'close',
'sma9',
'sma180',
'rsi14',
'vwap',
'sma9_var',
'sma180_var',
'vwap_var',
'spread14_e',
'volume14_34_var',
'prev_close',
'prev_close_var',
'prev_floor_var',
'prev_ceil_var',
'prev1_candle_score',
'prev2_candle_score',
'prev3_candle_score',
]
df = df[ls_col]
ls_col_na = df.columns[df.isna().any()].tolist()
if ls_col_na:
raise Exception(ERROR_NULL_COL.format(ls_col_na))
return df.reset_index(drop=1)
def add_peaks_valleys(df, order=5):
'''Returns Dataframe with additional columns:
peak_valley - 1 if peak, -1 if valley, 0 o.w.
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
close
order (int): How many points on each side to use for the comparison to consider
Returns:
df (pandas.DataFrame)
'''
df['peak_valley'] = 0
col_peak_valley = list(df).index('peak_valley')
peak_indexes = scipy.signal.argrelextrema(np.array(df['close']), np.greater, order = order)[0]
valley_indexes = scipy.signal.argrelextrema(np.array(df['close']), np.less, order = order)[0]
df.iloc[peak_indexes, col_peak_valley] = 1
df.iloc[valley_indexes, col_peak_valley] = -1
return df
def add_valley_variances(df):
'''Returns Dataframe with additional columns:
valley_close_pct_chg (float): % change in close of current and previous valley e.g. 1% -> 0.01
valley_rsi_diff (float): Change in rsi of current and previous valley
valley_interval_mins (float): Minutes since last valley
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
close
rsi14
peak_valley
Returns:
df (pandas.DataFrame)
'''
df['valley_close'] = np.where(df['peak_valley']==-1, df['close'], np.nan)
df['valley_rsi'] = np.where(df['peak_valley']==-1, df['rsi14'], np.nan)
df['valley_datetime'] = pd.to_datetime(np.where(df['peak_valley']==-1, df['datetime'], pd.NaT))
df['valley_close'] = df['valley_close'].ffill()
df['valley_rsi'] = df['valley_rsi'].ffill()
df['valley_datetime'] = df['valley_datetime'].ffill()
df['valley_close_pct_chg'] = df['valley_close'].pct_change()
df['valley_rsi_diff'] = df['valley_rsi'].diff()
df['valley_interval_mins'] = df['valley_datetime'].diff().astype('timedelta64[m]')
df = df.drop(columns=['valley_close'
,'valley_rsi'
,'valley_datetime'])
return df
def add_divergences(df, close_buffer=0, rsi_buffer=0):
'''Returns Dataframe with additional columns:
divergence (str):
'bull_reg' - Regular bullish divergence i.e. Lower price valleys, but rise in RSI
'bull_hid' - Hidden bullish divergence i.e. Higher price valleys, but drop in RSI
'' - No divergence
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
valley_close_pct_chg
valley_rsi_diff
close_buffer (float): Price change must be at least this % change to count as divergence, e.g 1.5 -> 1.5%
rsi_buffer (float): RSI change must be at least this change to count as divergence
Returns:
df (pandas.DataFrame)
'''
df['divergence'] = ''
df['divergence'] = np.where((df['valley_close_pct_chg'] < -(close_buffer/100))
&(df['valley_rsi_diff'] > rsi_buffer)
,'bull_reg'
,df['divergence'])
df['divergence'] = np.where((df['valley_close_pct_chg'] > (close_buffer/100))
&(df['valley_rsi_diff'] < -rsi_buffer)
,'bull_hid'
,df['divergence'])
return df
def add_additional_measures(df, sym):
'''Add last few features to Dataframe
Args:
df (pandas.Dataframe)
Returns:
df (pandas.Dataframe)
'''
df['mins_from_start'] = (df['datetime']-df['datetime'].min()).astype('timedelta64[m]')
df['valley_close_score'] = df['valley_close_pct_chg'].abs()*100
df['valley_rsi_score'] = df['valley_rsi_diff'].abs()
df['day_open_var'] = df['close']/df['close'].to_list()[0] - 1
df['open_from_prev_close_var'] = df['close'].to_list()[0]/df['prev_close'] - 1
df['ceil'] = df['close'].cummax()
df['ceil_var'] = df['close']/df['ceil'] - 1
df['floor'] = df['close'].cummin()
df['floor_var'] = df['close']/df['floor'] - 1
df['sym'] = sym
#df['hour_of_day'] = (df['datetime'] - pd.Timedelta(minutes=29)).dt.hour
#df['weekday'] = df['datetime'].dt.weekday.astype('category') #monday is 0
return df
def add_is_profit(df, target_profit, target_loss):
'''Returns Dataframe with additional columns, calculated based on input profit/loss parameters:
actual_buy_price (float)
profit (float)
is_profit (bool)
Args:
df (pandas.DataFrame): Sorted Dataframe with at least these columns:
close (float)
divergence (str)
target_profit (float): Target percentage profit e.g. 0.01 -> 1%
target_loss (float): Target percentage loss e.g. 0.01 -> 1%
Returns:
df (pandas.DataFrame)
'''
buy_delay = 2 #only buy after n mins
df['actual_buy_price'] = df['close'].shift(-buy_delay)
df['profit'] = None
for idx_div_row in df.index[df['divergence']!='']:
actual_buy_price = df.iloc[idx_div_row, df.columns.get_loc('actual_buy_price')]
profit = 0
for selling_price in df.iloc[idx_div_row:-buy_delay, df.columns.get_loc('actual_buy_price')]:
profit = (selling_price/actual_buy_price)-1
if profit>target_profit or profit<target_loss:
break
df.at[idx_div_row, 'profit'] = profit
df['is_profit'] = df['profit']>=target_profit
df['profit'] = df['profit'].astype('float')
return df
def get_dt_day_indicators(sym, close_latest, date_str_tdy, db):
'''Returns dictionary with day level indicators for input symbol
Args:
sym (str)
close_latest (float)
Returns:
dt_day_indicators (Dictionary)
'''
q='''
with t as (
select date, adj_close
from prices_d
where sym = '{}'
and date(date) < '{}'
order by date desc
limit 185
)
select adj_close
from t
order by date
'''.format(sym.upper(), date_str_tdy)
df = pd.read_sql(q, db.conn)
df = df.append(pd.DataFrame({'adj_close':[close_latest]}))
df['sma9'] = df['adj_close'].rolling(9).mean()
df['sma90'] = df['adj_close'].rolling(90).mean()
df['sma180'] = df['adj_close'].rolling(180).mean()
df['sma180'] = df['sma180'].fillna(df['sma90'])
df['sma9_var'] = (df['adj_close']/df['sma9'])-1
df['sma180_var'] = (df['adj_close']/df['sma180'])-1
df = add_rsi(df, 14)
ls_col = [
'sma9_var',
'sma180_var',
'rsi14',
]
if df[ls_col].iloc[-1].isnull().any():
raise Exception(ERROR_NULL_DAY_LEVEL_IND)
dt_day_indicators = dict(df[ls_col].iloc[-1])
return dt_day_indicators
def add_day_level_indicators(df_i, sym, db):
'''Returns df_interim with day-level indicators added
Args:
df_i (pandas.DataFrame)
sym (str)
Returns:
df_i (pandas.DataFrame)
'''
close_latest = df_i['close'].to_list()[0]
date_str_tdy = df_i['datetime'].to_list()[-1].strftime('%Y-%m-%d')
dt_day_indicators = get_dt_day_indicators(sym, close_latest, date_str_tdy, db)
for col, value in dt_day_indicators.items():
df_i[f'day_{col}'] = value
return df_i
def get_df_c(sym, date_str, live_data, db, target_profit, target_loss, index_limit=1000):
'''Returns df_cooked
Args:
sym (str)
date_str (str)
live_data (int)
db (DataBase object)
target_profit (float): Target percentage profit e.g. 0.01 -> 1%
target_loss (float): Target percentage loss e.g. -0.01 -> -1%
Returns:
df_c (pd.DataFrame)
'''
assert target_profit>0 and target_loss<0
df_i = get_df_i(sym, date_str, live_data, db)
df_i = df_i.iloc[:index_limit]
df_i = add_peaks_valleys(df_i, order=5)
df_i = add_valley_variances(df_i)
df_i = add_divergences(df_i)
df_i = add_day_level_indicators(df_i, sym, db)
df_c = add_additional_measures(df_i, sym)
df_c = add_is_profit(df_c, target_profit, target_loss)
return df_c
def get_curr_price(sym):
'''Returns current price for input symbol
Args:
sym (str)
Returns:
curr_price (float)
'''
df = yf.download(sym, period='1d', interval='1m', progress=0).reset_index()
curr_price = df['Adj Close'].to_list()[-1]
return curr_price
def get_df_info(sym):
'''Returns dataframe containing general info about input symbol
Args:
sym (str): e.g. BYND
Returns:
df_info (pandas.DataFrame)
sym (str)
long_name (str)
sec (str)
ind (str)
quote_type (str)
fund_family (str)
summary (str)
timestamp (datetime)
'''
dt_info = yf.Ticker(sym).info
dt_info['timestamp'] = datetime.datetime.now()
dt_info['sector'] = dt_info.get('sector')
dt_col = {
'symbol':'sym',
'longName':'long_name',
'sector':'sec',
'industry':'ind',
'quoteType':'quote_type',
'fundFamily':'fund_family',
'longBusinessSummary':'summary',
'timestamp':'timestamp',
}
dt_info = {key:dt_info.get(key) for key in dt_col}
df_info = | pd.DataFrame([dt_info]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 11:03:53 2019
@author: razin.hussain
"""
import pandas as pd
import numpy as np
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import pymysql
from sqlalchemy import create_engine
import xml.etree.ElementTree as ET
import datetime
import time
from functools import reduce
conn = pymysql.connect(host = [host_name], user = [user_name], passwd = [password])
cur = conn.cursor()
cur.execute("use [database_name]")
#cur.execute("drop table if exists symbol")
#cur.execute("create table symbol (id INTEGER PRIMARY KEY AUTO_INCREMENT UNIQUE, ticker VARCHAR (64), security VARCHAR(255), sector VARCHAR (64), sub_sector VARCHAR(255), headquarter VARCHAR (255), CIK VARCHAR(255))")
engine = create_engine("mysql+pymysql://[user]: [password]@[host_name]: [port]/[database_name]")
def sp500_cik():
# The function grabs stock IDs, symbols and CIKs from the database
# and put them in a list
df = pd.read_sql_table("symbol", con = engine)
cik_lst = []
ticker_lst = []
id_lst = []
for cik in df["CIK"]:
cik_lst.append(cik)
for ticker in df["ticker"]:
ticker_lst.append(ticker)
for id in df["id"]:
id_lst.append(id)
return cik_lst, ticker_lst, id_lst
def first_scraper(urlname1):
# The function takes a url as a parameter, (example url:
# https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=0000764180&type=10-K&dateb=&owner=exclude&count=40)
# extract the urls of 10Ks and 10Qs from the initial page
# and convert it into executable urls and append them in a list
filings = []
url_lst = []
html1 = urllib.request.urlopen(urlname1).read()
soup1 = BeautifulSoup(html1, 'html.parser')
for tr1 in soup1.find_all("tr")[2:]:
try:
tds1 = tr1.find_all("td")
document_type = tds1[0].text.rstrip()
if document_type == "10-Q" or document_type == "10-K":
filings.append(document_type)
hrefs1 = tds1[1].findChildren()
hrefs1 = hrefs1[0].get("href")
base_url1 = "https://www.sec.gov"
final_url1 = base_url1 + hrefs1
url_lst.append(final_url1)
except:
continue
return filings, url_lst
#x = first_scraper("https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=0000764180&type=10-K&dateb=&owner=exclude&count=40")
def second_scraper(urlname2):
# The function takes a url as a parameter, (examle url:
# https://www.sec.gov/Archives/edgar/data/55785/000005578519000032/0000055785-19-000032-index.htm)
# extracts the xbrl_link, report date and report period from the page and returns them as string
html2 = urllib.request.urlopen(urlname2).read()
soup2 = BeautifulSoup(html2, 'html.parser')
try:
table = soup2.find("table", {"class": "tableFile", "summary": "Data Files"})
rows = table.find_all("tr")
except:
return 0
for row in rows:
tds = row.find_all("td")
if len(tds) > 3:
if "INS" in tds[3].text or "XML" in tds[3].text:
xbrl_link = tds[2].a["href"]
xbrl_link = "https://www.sec.gov" + xbrl_link
# if len(tds) > 3:
# if "DEF" in tds[3].text:
# desc_link = tds[2].a["href"]
# desc_link = "https://www.sec.gov" + desc_link
div = soup2.find("div", {"class":"formContent"})
div1 = div.find_all("div", {"class":"formGrouping"})[0]
report_date = div1.find_all("div", {"class":"info"})[0].text.rstrip()
div2 = div.find_all("div", {"class":"formGrouping"})[1]
report_period = div2.find("div", {"class":"info"}).text.rstrip()
return xbrl_link, report_date, report_period
# x = second_scraper("https://www.sec.gov/Archives/edgar/data/1551152/000155115219000008/0001551152-19-000008-index.htm")
def xbrl_scraper(urlname2, report_type):
# The heart of the program. The function xbrl_scraper takes the financial statement xml file url
# and type of report as arguments, which has already been scraped using the previous two functions.
# It initates a master dictionary that include three other dictionaries (branch dictionaries) -
# IS dictionary, BS dictionary and CFS dictionary. The function than searches for income statement,
# balance sheet and cash flow statement items in that order for the specific quarter/year the
# document is representing and update the respective dictionaries once the items are found.
# When a branch dictionary is completed, master dictioanry is updated with branch.
# Calls the second_scraper function - see description above"
file_page = second_scraper(urlname2)
try:
xbrl_link = file_page[0]
except:
print ("XBRL format does not exist")
return 0
# Opens the xml file and creates and creates a root object that allows access to initial tag in the xml file
xml = urllib.request.urlopen(xbrl_link)
tree = ET.parse(xml)
root = tree.getroot()
master_dct = {}
# Unique dei list for a particular year generic to all companies
dei_lst = ["{http://xbrl.sec.gov/dei/2009-01-31}", "{http://xbrl.sec.gov/dei/2010-01-31}", "{http://xbrl.sec.gov/dei/2011-01-31}", "{http://xbrl.sec.gov/dei/2012-01-31}", "{http://xbrl.sec.gov/dei/2013-01-31}", "{http://xbrl.sec.gov/dei/2014-01-31}", "{http://xbrl.sec.gov/dei/2015-01-31}", "{http://xbrl.sec.gov/dei/2016-01-31}", "{http://xbrl.sec.gov/dei/2017-01-31}", "{http://xbrl.sec.gov/dei/2018-01-31}", "{http://xbrl.sec.gov/dei/2019-01-31}"]
for dei in dei_lst:
try:
period_code = "DocumentFiscalPeriodFocus"
period_code = dei + period_code
period = root.find(period_code).text
year_code = "DocumentFiscalYearFocus"
year_code = dei + year_code
year = root.find(year_code).text
period_code = "DocumentPeriodEndDate"
period_code = dei + period_code
report_period = root.find(period_code).text
print (report_period)
quarter = period + year
print (quarter)
shares_code = "EntityCommonStockSharesOutstanding"
shares_code = dei + shares_code
shares_os = root.find(shares_code).text
print ("Common shares O/S:", shares_os)
except:
continue
for context in root.iter("{http://www.xbrl.org/2003/instance}context"):
try:
entity = context.find("{http://www.xbrl.org/2003/instance}entity")
cik = entity.find("{http://www.xbrl.org/2003/instance}identifier").text
print ("CIK:", cik)
break
except:
continue
is_dct = {}
# Find the spefic id attribute that contains the date code for the current quarter
# INCOME STATEMENT Scraper
for context in root.iter("{http://www.xbrl.org/2003/instance}context"):
if "Q" in context.get("id") and "D" in context.get("id") and "_" not in context.get("id") and "YTD" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 1st method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 1st method:", date_code)
except:
continue
elif "Y" in context.get("id") and "D" in context.get("id") and "_" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 2nd method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 2nd method:", date_code)
except:
continue
elif "D" in context.get("id") and report_period[0:4] in context.get("id") and "_" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 3rd method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 3rd method:", date_code)
except:
continue
elif cik in context.get("id") and "".join(report_period.split("-")) in context.get("id") and "us-gaap" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 4th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 4th method:", date_code)
except:
continue
elif "Duration" in context.get("id") and "us-gaap" not in context.get("id") and "dei" not in context.get("id") and "srt" not in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 5th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 5th method:", date_code)
except:
continue
#[0:29]
#[0:29]
elif "FROM" in context.get("id") and "TO" in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 6th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 6th method:", date_code)
except:
continue
elif "eol" in context.get("id") and "".join(report_period.split("-")) in context.get("id") and context.get("id").endswith("0") and "x" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("No. of days:", days)
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 7th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# print ("Second if worked")
date_code = context.get("id")
print ("IS 7th method:", date_code)
except:
continue
# else:
# period = context.find("{http://www.xbrl.org/2003/instance}period")
# try:
# start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
# start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
# end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
# end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
# delta = end_date_dt - start_date_dt
# days = delta.days
# print ("No. of days:", days)
#
# if report_type == "10-Q":
# if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days < 120 and days != 0:
# date_code = context.get("id")
# print ("IS last method:", date_code)
# elif report_type == "10-K":
# if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period and days > 280:
# date_code = context.get("id")
# print ("IS last method:", date_code)
# except:
# continue
# Different varitaions of the income statement items are tested to ensure items are not missed during scraping
v_lst = ["{http://xbrl.us/us-gaap/2009-01-31}", "{http://xbrl.us/us-gaap/2010-01-31}", "{http://fasb.org/us-gaap/2011-01-31}", "{http://fasb.org/us-gaap/2012-01-31}", "{http://fasb.org/us-gaap/2013-01-31}", "{http://fasb.org/us-gaap/2014-01-31}", "{http://fasb.org/us-gaap/2015-01-31}", "{http://fasb.org/us-gaap/2016-01-31}", "{http://fasb.org/us-gaap/2017-01-31}", "{http://fasb.org/us-gaap/2018-01-31}", "{http://fasb.org/us-gaap/2019-01-31}"]
sales_lst = ["TotalRevenuesNetOfInterestExpense", "OilAndGasRevenue", "ExplorationAndProductionRevenue", "RegulatedAndUnregulatedOperatingRevenue", "AssetManagementFees1", "RealEstateRevenueNet", "SalesRevenueGoodsNet", "SalesRevenueServicesGross", "SalesRevenueServicesNet", "SalesRevenueNet", "Revenues", "RevenueFromContractWithCustomerExcludingAssessedTax", "RevenueFromContractWithCustomerIncludingAssessedTax"]
cos_lst = ["ContractRevenueCost", "CostOfGoodsSoldExcludingDepreciationDepletionAndAmortization", "CostOfGoodsSold", "CostOfServices", "CostOfGoodsAndServicesSold", "CostOfRevenue"]
sga_lst = ["SellingGeneralAndAdministrativeExpense"]
ga_lst = ["GeneralAndAdministrativeExpense"]
marketing_lst = ["SellingAndMarketingExpense", "AdvertisingExpense"]
salary_lst = ["LaborAndRelatedExpense"]
rd_lst = ["CommunicationsAndInformationTechnology", "ResearchAndDevelopmentExpenseExcludingAcquiredInProcessCost", "ResearchAndDevelopmentExpense"]
# distrib_lst = ["DistributionExpenses"]
# rental_lst = ["DirectCostsOfLeasedAndRentedPropertyOrEquipment"]
# provloss_lst = ["PolicyholderBenefitsAndClaimsIncurredNet", "ProvisionForLoanLeaseAndOtherLosses"]
restructure_lst = ["RestructuringCharges"]
impairexp_lst = ["AmortizationOfIntangibleAssets", "ImpairmentOfRealEstate", "DeferredPolicyAcquisitionCostsAndPresentValueOfFutureProfitsAmortization1","GoodwillAndIntangibleAssetImpairment", "GoodwillImpairmentLoss", "RestructuringSettlementAndImpairmentProvisions"]
extdebt_lst = ["GainsLossesOnExtinguishmentOfDebt"]
litigexp_lst = ["LitigationSettlementExpense"]
opexp_lst = ["OperatingExpenses"]
totexp_lst = ["BenefitsLossesAndExpenses", "NoninterestExpense", "CostsAndExpenses"]
oi_lst = ["OperatingIncomeLoss"]
intexp_lst = ["InterestExpenseBorrowings", "InterestExpense", "InterestExpenseNet", "InterestIncomeExpenseNet"]
nonopexp_lst = ["OtherNonoperatingIncomeExpense", "OtherNoninterestExpense"]
pretaxinc_lst = ["IncomeLossFromOperationsBeforeIncomeTaxExpenseBenefit", "IncomeLossFromContinuingOperationsBeforeIncomeTaxesExtraordinaryItemsNoncontrollingInterest", "IncomeLossFromContinuingOperationsBeforeIncomeTaxesMinorityInterestAndIncomeLossFromEquityMethodInvestments", "IncomeLossFromContinuingOperationsIncludingPortionAttributableToNoncontrollingInterest"]
tax_lst = ["IncomeTaxExpenseBenefit"]
ni_lst = ["ProfitLoss", "NetIncomeLoss"]
eps_lst = ["EarningsPerShareBasic", "EarningsPerShareBasicAndDiluted", "EarningsPerShareDiluted"]
sharesdiluted_lst = ["CommonStockSharesOutstanding", "WeightedAverageNumberOfShareOutstandingBasicAndDiluted", "WeightedAverageNumberOfSharesOutstandingBasic", "WeightedAverageNumberOfDilutedSharesOutstanding"]
div_lst = ["CommonStockDividendsPerShareDeclared", "CommonStockDividendsPerShareCashPaid"]
# Extracting income statement items from the xml file and appending to a list
# Looping through every version and every variation to access the required tags
for version in v_lst:
for sales_term in sales_lst:
sales_elem = version + sales_term
try:
for sales in root.iter(sales_elem):
if sales.get("contextRef") == date_code:
is_dct.update({"sales": sales.text})
except:
continue
for cos_term in cos_lst:
cos_elem = version + cos_term
try:
for cos in root.iter(cos_elem):
if cos.get("contextRef") == date_code:
is_dct.update({"costofsales": cos.text})
except:
continue
for oi_term in oi_lst:
oi_elem = version + oi_term
try:
for oi in root.iter(oi_elem):
if oi.get("contextRef") == date_code:
is_dct.update({"operatingincome": oi.text})
except:
continue
for intexp_term in intexp_lst:
intexp_elem = version + intexp_term
try:
for intexp in root.iter(intexp_elem):
if intexp.get("contextRef") == date_code:
is_dct.update({"interestexpense": intexp.text})
except:
continue
for tax_term in tax_lst:
tax_elem = version + tax_term
try:
for tax in root.iter(tax_elem):
if tax.get("contextRef") == date_code:
is_dct.update({"incometax": tax.text})
except:
continue
for ni_term in ni_lst:
ni_elem = version + ni_term
try:
for ni in root.iter(ni_elem):
if ni.get("contextRef") == date_code:
is_dct.update({"netincome": ni.text})
except:
continue
for eps_term in eps_lst:
eps_elem = version + eps_term
try:
for eps in root.iter(eps_elem):
if eps.get("contextRef") == date_code:
is_dct.update({"gaapdilutedeps": eps.text})
except:
continue
for sharesdiluted_term in sharesdiluted_lst:
sharesdiluted_elem = version + sharesdiluted_term
try:
for sharesdiluted in root.iter(sharesdiluted_elem):
if sharesdiluted.get("contextRef") == date_code:
is_dct.update({"dilutedsharesos": sharesdiluted.text})
except:
try:
is_dct.update({"dilutedsharesos": shares_os})
except:
continue
for div_term in div_lst:
div_elem = version + div_term
try:
for div in root.iter(div_elem):
if div.get("contextRef") == date_code:
is_dct.update({"dps": div.text})
except:
continue
for restructure_term in restructure_lst:
restructure_elem = version + restructure_term
try:
for restructure in root.iter(restructure_elem):
if restructure.get("contextRef") == date_code:
is_dct.update({"restructuringexpense": restructure.text})
except:
continue
for impairexp_term in impairexp_lst:
impairexp_elem = version + impairexp_term
try:
for impairexp in root.iter(impairexp_elem):
if impairexp.get("contextRef") == date_code:
is_dct.update({"impairmentexpense": impairexp.text})
except:
continue
for extdebt_term in extdebt_lst:
extdebt_elem = version + extdebt_term
try:
for extdebt in root.iter(extdebt_elem):
if extdebt.get("contextRef") == date_code:
is_dct.update({"extinguishmentdebt": extdebt.text})
except:
continue
for litigexp_term in litigexp_lst:
litigexp_elem = version + litigexp_term
try:
for litigexp in root.iter(litigexp_elem):
if litigexp.get("contextRef") == date_code:
is_dct.update({"litigationexpense": litigexp.text})
except:
continue
for opexp_term in opexp_lst:
opexp_elem = version + opexp_term
try:
for opexp in root.iter(opexp_elem):
if opexp.get("contextRef") == date_code:
is_dct.update({"operatingexpense": opexp.text})
except:
continue
for nonopexp_term in nonopexp_lst:
nonopexp_elem = version + nonopexp_term
try:
for nonopexp in root.iter(nonopexp_elem):
if nonopexp.get("contextRef") == date_code:
is_dct.update({"nonoperatingexpense": nonopexp.text})
except:
continue
for pretaxinc_term in pretaxinc_lst:
pretaxinc_elem = version + pretaxinc_term
try:
for pretaxinc in root.iter(pretaxinc_elem):
if pretaxinc.get("contextRef") == date_code:
is_dct.update({"pretaxincome": pretaxinc.text})
except:
continue
for sga_term in sga_lst:
sga_elem = version + sga_term
try:
for sga in root.iter(sga_elem):
if sga.get("contextRef") == date_code:
is_dct.update({"sellinggeneraladmin": sga.text})
except:
continue
for ga_term in ga_lst:
ga_elem = version + ga_term
try:
for ga in root.iter(ga_elem):
if ga.get("contextRef") == date_code:
is_dct.update({"generaladmin": ga.text})
except:
continue
for marketing_term in marketing_lst:
marketing_elem = version + marketing_term
try:
for marketing in root.iter(marketing_elem):
if marketing.get("contextRef") == date_code:
is_dct.update({"marketing": marketing.text})
except:
continue
for salary_term in salary_lst:
salary_elem = version + salary_term
try:
for salary in root.iter(salary_elem):
if salary.get("contextRef") == date_code:
is_dct.update({"salary": salary.text})
except:
continue
for rd_term in rd_lst:
rd_elem = version + rd_term
try:
for rd in root.iter(rd_elem):
if rd.get("contextRef") == date_code:
is_dct.update({"researchdevelopment": rd.text})
except:
continue
for totexp_term in totexp_lst:
totexp_elem = version + totexp_term
try:
for totexp in root.iter(totexp_elem):
if totexp.get("contextRef") == date_code:
is_dct.update({"totalexpense": totexp.text})
except:
continue
if len(is_dct) < 5:
continue
else:
new_dct = {"is_datecode": date_code, "report_period": report_period, "report_type": report_type, "start_date": start_date, "days_in_period": days, "quarter": quarter}
is_dct.update(new_dct)
break
break
master_dct.update({"Income Statement": is_dct})
# if len(datais_lst) < 2:
# continue
# else:
# break
# master_dct.update({"Income Statement": datais_lst})
bs_dct = {}
# Find the spefic id attribute that contains the date code for the current quarter
# BALANCE SHEET Scraper
for context in root.iter("{http://www.xbrl.org/2003/instance}context"):
if "Q" in context.get("id") and "I" in context.get("id") and "_" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("BS 1st method:", date_code)
except:
continue
elif "I" in context.get("id") and report_period[0:4] in context.get("id") and "_" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("BS 2nd method:", date_code)
except:
continue
# elif "YTD" in context.get("id") and "us-gaap" not in context.get("id"):
## print ("First if worked")
# period = context.find("{http://www.xbrl.org/2003/instance}period")
# try:
# if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
## print ("Second if worked")
# date_code = context.get("id")
# print ("2nd method", date_code)
# except:
# continue
# Replace the hardcoded cik number with dynamic cik term
elif cik in context.get("id") and "".join(report_period.split("-")) in context.get("id") and "us-gaap" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("BS 3rd method:", date_code)
except:
continue
elif "As_Of" in context.get("id") and "us-gaap" not in context.get("id") and "dei" not in context.get("id") and "srt" not in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
if report_type == "10-Q":
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
date_code = context.get("id")
print ("BS 4th method:", date_code)
except:
continue
elif report_type == "10-K":
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
date_code = context.get("id")
print ("BS 4th method:", date_code)
except:
continue
#[0:16]
#[0:16]
elif "AS_OF" in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
if report_type == "10-Q":
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
date_code = context.get("id")
print ("BS 5th method:", date_code)
except:
continue
elif report_type == "10-K":
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
date_code = context.get("id")
print ("BS 5th method:", date_code)
except:
continue
elif "eol" in context.get("id") and "".join(report_period.split("-")) in context.get("id") and context.get("id").endswith("0") and "x" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("BS 6th method:", date_code)
except:
continue
# else:
# period = context.find("{http://www.xbrl.org/2003/instance}period")
# try:
# if period.find("{http://www.xbrl.org/2003/instance}instant").text == report_period:
# date_code = context.get("id")
# print ("BS last method:", date_code)
# except:
# continue
# Different varitaions of the balance sheet items are tested to ensure items are not missed during scraping
v_lst = ["{http://xbrl.us/us-gaap/2009-01-31}", "{http://xbrl.us/us-gaap/2010-01-31}", "{http://fasb.org/us-gaap/2011-01-31}", "{http://fasb.org/us-gaap/2012-01-31}", "{http://fasb.org/us-gaap/2013-01-31}", "{http://fasb.org/us-gaap/2014-01-31}", "{http://fasb.org/us-gaap/2015-01-31}", "{http://fasb.org/us-gaap/2016-01-31}", "{http://fasb.org/us-gaap/2017-01-31}", "{http://fasb.org/us-gaap/2018-01-31}", "{http://fasb.org/us-gaap/2019-01-31}"]
cash_lst = ["CashAndCashEquivalentsAtCarryingValue", "CashCashEquivalentsRestrictedCashAndRestrictedCashEquivalents"]
ar_lst = ["AccountsReceivableNetCurrent", "ReceivablesNetCurrent"]
inv_lst = ["InventoryNet"]
ca_lst = ["AssetsCurrent"]
ppe_lst = ["PropertyPlantAndEquipmentNet"]
goodw_lst = ["Goodwill"]
assets_lst = ["Assets"]
stdebt_lst = ["DebtCurrent", "LongTermDebtCurrent", "ShortTermBankLoansAndNotesPayable", "ShortTermBorrowings"]
ap_lst = ["AccountsPayableCurrent", "AccountsPayableTradeCurrent"]
cl_lst = ["LiabilitiesCurrent"]
ltdebt_lst = ["OtherLongTermDebtNoncurrent", "DebtAndCapitalLeaseObligations", "LongTermDebt", "LongTermDebtNoncurrent", "LongTermDebtAndCapitalLeaseObligations"]
paidincap_lst = ["AdditionalPaidInCapital", "AdditionalPaidInCapitalCommonStock", "CommonStocksIncludingAdditionalPaidInCapital"]
retearnings_lst = ["RetainedEarningsAccumulatedDeficit"]
nc_lst = ["MinorityInterest"]
equity_lst = ["StockholdersEquity", "StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest"]
liabequity_lst = ["LiabilitiesAndStockholdersEquity"]
# Extracting balance sheet items from the xml file and appending to a list
for version in v_lst:
for cash_term in cash_lst:
cash_elem = version + cash_term
try:
for cash in root.iter(cash_elem):
if cash.get("contextRef") == date_code:
bs_dct.update({"cashandcashequivalents": cash.text})
except:
continue
for ar_term in ar_lst:
ar_elem = version + ar_term
try:
for ar in root.iter(ar_elem):
if ar.get("contextRef") == date_code:
bs_dct.update({"accountsreceivable": ar.text})
except:
continue
for inv_term in inv_lst:
inv_elem = version + inv_term
try:
for inv in root.iter(inv_elem):
if inv.get("contextRef") == date_code:
bs_dct.update({"inventory": inv.text})
except:
continue
for ca_term in ca_lst:
ca_elem = version + ca_term
try:
for ca in root.iter(ca_elem):
if ca.get("contextRef") == date_code:
bs_dct.update({"currentassets": ca.text})
except:
continue
for ppe_term in ppe_lst:
ppe_elem = version + ppe_term
try:
for ppe in root.iter(ppe_elem):
if ppe.get("contextRef") == date_code:
bs_dct.update({"propertyplantequipment": ppe.text})
except:
continue
for goodw_term in goodw_lst:
goodw_elem = version + goodw_term
try:
for goodw in root.iter(goodw_elem):
if goodw.get("contextRef") == date_code:
bs_dct.update({"goodwill": goodw.text})
except:
continue
for assets_term in assets_lst:
assets_elem = version + assets_term
try:
for assets in root.iter(assets_elem):
if assets.get("contextRef") == date_code:
bs_dct.update({"assets": assets.text})
except:
continue
for stdebt_term in stdebt_lst:
stdebt_elem = version + stdebt_term
try:
for stdebt in root.iter(stdebt_elem):
if stdebt.get("contextRef") == date_code:
bs_dct.update({"shorttermdebt": stdebt.text})
except:
continue
for ap_term in ap_lst:
ap_elem = version + ap_term
try:
for ap in root.iter(ap_elem):
if ap.get("contextRef") == date_code:
bs_dct.update({"accountspayable": ap.text})
except:
continue
for cl_term in cl_lst:
cl_elem = version + cl_term
try:
for cl in root.iter(cl_elem):
if cl.get("contextRef") == date_code:
bs_dct.update({"currentliabilities": cl.text})
# print ("Total Current Liabilities", cl.text)
except:
continue
for ltdebt_term in ltdebt_lst:
ltdebt_elem = version + ltdebt_term
try:
for ltdebt in root.iter(ltdebt_elem):
if ltdebt.get("contextRef") == date_code:
bs_dct.update({"longtermdebt": ltdebt.text})
# print ("Long-term Debt", ltdebt.text)
except:
continue
for paidincap_term in paidincap_lst:
paidincap_elem = version + paidincap_term
try:
for paidincap in root.iter(paidincap_elem):
if paidincap.get("contextRef") == date_code:
bs_dct.update({"additionalpic": paidincap.text})
except:
continue
for retearnings_term in retearnings_lst:
retearnings_elem = version + retearnings_term
try:
for retearnings in root.iter(retearnings_elem):
if retearnings.get("contextRef") == date_code:
bs_dct.update({"retainedearnings": retearnings.text})
except:
continue
for nc_term in nc_lst:
nc_elem = version + nc_term
try:
for nc in root.iter(nc_elem):
if nc.get("contextRef") == date_code:
bs_dct.update({"ncinterest": nc.text})
except:
continue
for equity_term in equity_lst:
equity_elem = version + equity_term
try:
for equity in root.iter(equity_elem):
if equity.get("contextRef") == date_code:
bs_dct.update({"equity": equity.text})
except:
continue
for liabequity_term in liabequity_lst:
liabequity_elem = version + liabequity_term
try:
for liabequity in root.iter(liabequity_elem):
if liabequity.get("contextRef") == date_code:
bs_dct.update({"liabilitiesequity": liabequity.text})
except:
continue
if len(bs_dct) < 5:
continue
else:
new_dct = {"bs_datecode": date_code, "report_period": report_period, "report_type": report_type, "quarter": quarter}
bs_dct.update(new_dct)
break
break
# if len(databs_lst) < 2:
# continue
# else:
# break
master_dct.update({"Balance Sheet": bs_dct})
# master_dct.update({"Balance Sheet": databs_lst})
cfs_dct = {}
# datacfs_lst = []
# Find the spefic id attribute that contains the date code for the current quarter
# CASH FLOW STATEMENT Scraper
for context in root.iter("{http://www.xbrl.org/2003/instance}context"):
if "Q" in context.get("id") and "D" in context.get("id") and "us-gaap" not in context.get("id") and "dei" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("CFS 1st method", date_code)
except:
continue
elif "D" in context.get("id") and report_period[0:4] in context.get("id") and "us-gaap" not in context.get("id") and "dei" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("CFS 2nd method", date_code)
except:
continue
elif "YTD" in context.get("id") and "_" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("CFS 3rd method:", date_code)
except:
continue
# Replace the hardcoded cik number with dynamic cik term
elif cik in context.get("id") and "".join(report_period.split("-")) in context.get("id") and "us-gaap" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("CFS 4th method:", date_code)
except:
continue
elif "Duration" in context.get("id") and "us-gaap" not in context.get("id") and "dei" not in context.get("id") and "srt" not in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
date_code = context.get("id")
print ("CFS 5th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
date_code = context.get("id")
print ("CFS 5th method:", date_code)
#[0:29]
#[0:29]
elif "FROM" in context.get("id") and "TO" in context.get("id"):
period = context.find("{http://www.xbrl.org/2003/instance}period")
if report_type == "10-Q":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
date_code = context.get("id")
print ("CFS 6th method:", date_code)
elif report_type == "10-K":
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
date_code = context.get("id")
print ("CFS 6th method:", date_code)
elif "eol" in context.get("id") and "".join(report_period.split("-")) in context.get("id") and context.get("id").endswith("0") and "x" not in context.get("id"):
# print ("First if worked")
period = context.find("{http://www.xbrl.org/2003/instance}period")
try:
if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# print ("Second if worked")
date_code = context.get("id")
print ("CFS 7th method:", date_code)
except:
continue
# else:
# period = context.find("{http://www.xbrl.org/2003/instance}period")
# try:
# if period.find("{http://www.xbrl.org/2003/instance}endDate").text == report_period:
# date_code = context.get("id")
# print ("CFS last method:", date_code)
# except:
# continue
# Different varitaions of the casg flow statement items are tested to ensure items are not missed during scraping
v_lst = ["{http://xbrl.us/us-gaap/2009-01-31}", "{http://xbrl.us/us-gaap/2010-01-31}", "{http://fasb.org/us-gaap/2011-01-31}", "{http://fasb.org/us-gaap/2012-01-31}", "{http://fasb.org/us-gaap/2013-01-31}", "{http://fasb.org/us-gaap/2014-01-31}", "{http://fasb.org/us-gaap/2015-01-31}", "{http://fasb.org/us-gaap/2016-01-31}", "{http://fasb.org/us-gaap/2017-01-31}", "{http://fasb.org/us-gaap/2018-01-31}", "{http://fasb.org/us-gaap/2019-01-31}"]
da_lst = ["AmortizationOfIntangibleAssets", "AdjustmentForAmortization", "DepreciationNonproduction", "Depreciation", "DepreciationAmortizationAndAccretionNet", "DepreciationAndAmortization", "DepreciationDepletionAndAmortization"]
sbcomp_lst = ["ShareBasedCompensation"]
cashop_lst = ["NetCashProvidedByUsedInOperatingActivities", "NetCashProvidedByUsedInOperatingActivitiesContinuingOperations"]
capex_lst = ["PaymentsForCapitalImprovements", "PaymentsToAcquireProductiveAssets", "PaymentsForProceedsFromProductiveAssets", "PaymentsToAcquirePropertyPlantAndEquipment", "PaymentsForConstructionInProcess"]
acq_lst = ["PaymentsToAcquireBusinessesNetOfCashAcquired", "PaymentsToAcquireBusinessesAndInterestInAffiliates"]
cashinv_lst = ["NetCashProvidedByUsedInInvestingActivities", "NetCashProvidedByUsedInInvestingActivitiesContinuingOperations"]
debtissued_lst = ["ProceedsFromIssuanceOfLongTermDebt"]
debtrepaid_lst = ["RepaymentsOfDebtAndCapitalLeaseObligations", "RepaymentsOfSecuredDebt", "RepaymentsOfLongTermDebt", "RepaymentsOfDebt"]
equityissued_lst = ["ProceedsFromIssuanceOfCommonStock"]
equitybought_lst = ["PaymentsForRepurchaseOfCommonStock"]
cashfin_lst = ["NetCashProvidedByUsedInFinancingActivities", "NetCashProvidedByUsedInFinancingActivitiesContinuingOperations"]
# Extracting chas flow statement items from the xml file and appending to a list
for version in v_lst:
for da_term in da_lst:
da_elem = version + da_term
try:
for da in root.iter(da_elem):
if da.get("contextRef") == date_code:
cfs_dct.update({"da": da.text})
except:
continue
for sbcomp_term in sbcomp_lst:
sbcomp_elem = version + sbcomp_term
try:
for sbcomp in root.iter(sbcomp_elem):
if sbcomp.get("contextRef") == date_code:
cfs_dct.update({"sbcomp": sbcomp.text})
except:
continue
for cashop_term in cashop_lst:
cashop_elem = version + cashop_term
try:
for cashop in root.iter(cashop_elem):
if cashop.get("contextRef") == date_code:
cfs_dct.update({"cashfromoperations": cashop.text})
except:
continue
for capex_term in capex_lst:
capex_elem = version + capex_term
try:
for capex in root.iter(capex_elem):
if capex.get("contextRef") == date_code:
cfs_dct.update({"capex": capex.text})
except:
continue
for acq_term in acq_lst:
acq_elem = version + acq_term
try:
for acq in root.iter(acq_elem):
if acq.get("contextRef") == date_code:
cfs_dct.update({"acquisitionspend": acq.text})
except:
continue
for cashinv_term in cashinv_lst:
cashinv_elem = version + cashinv_term
try:
for cashinv in root.iter(cashinv_elem):
if cashinv.get("contextRef") == date_code:
cfs_dct.update({"cashfrominvesting": cashinv.text})
except:
continue
for debtissued_term in debtissued_lst:
debtissued_elem = version + debtissued_term
try:
for debtissued in root.iter(debtissued_elem):
if debtissued.get("contextRef") == date_code:
cfs_dct.update({"debtissuance": debtissued.text})
except:
continue
for debtrepaid_term in debtrepaid_lst:
debtrepaid_elem = version + debtrepaid_term
try:
for debtrepaid in root.iter(debtrepaid_elem):
if debtrepaid.get("contextRef") == date_code:
cfs_dct.update({"debtrepayment": debtrepaid.text})
except:
continue
for equityissued_term in equityissued_lst:
equityissued_elem = version + equityissued_term
try:
for equityissued in root.iter(equityissued_elem):
if equityissued.get("contextRef") == date_code:
cfs_dct.update({"equityissuance": equityissued.text})
except:
continue
for equitybought_term in equitybought_lst:
equitybought_elem = version + equitybought_term
try:
for equitybought in root.iter(equitybought_elem):
if equitybought.get("contextRef") == date_code:
cfs_dct.update({"sharebuyback": equitybought.text})
except:
continue
for cashfin_term in cashfin_lst:
cashfin_elem = version + cashfin_term
try:
for cashfin in root.iter(cashfin_elem):
if cashfin.get("contextRef") == date_code:
cfs_dct.update({"cashfromfinancing": cashfin.text})
except:
continue
if len(cfs_dct) < 4:
continue
else:
# period = context.find("{http://www.xbrl.org/2003/instance}period")
start_date = period.find("{http://www.xbrl.org/2003/instance}startDate").text
start_date_dt = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = period.find("{http://www.xbrl.org/2003/instance}endDate").text
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d")
delta = end_date_dt - start_date_dt
days = delta.days
print ("CFS No. of days:", days)
# if days < 110:
# new_quarter = "Q1" + report_period[0:4]
# elif 165 < days < 195:
# new_quarter = "Q2" + report_period[0:4]
# elif 255 < days < 285:
# new_quarter = "Q3" + report_period[0:4]
# elif 355 < days < 370:
# new_quarter = "FY" + report_period[0:4]
# print ("CFS quarter", new_quarter)
#
# if quarter == new_quarter:
# quarter = quarter
# print ("Quarter code is same")
# else:
# quarter = new_quarter
# is_dct.update({"quarter": quarter})
# bs_dct.update({"quarter": quarter})
# print ("Quarter code is different")
new_dct = {"cfs_datecode": date_code, "report_period": report_period, "report_type": report_type, "start_date": start_date, "days_in_period": days, "quarter": quarter}
cfs_dct.update(new_dct)
break
break
master_dct.update({"Cash Flow Statement": cfs_dct})
# master_dct.update({"Cash Flow Statement": datacfs_lst})
return master_dct, is_dct, bs_dct, cfs_dct
#fs = xbrl_scraper(urlname2 = "https://www.sec.gov/Archives/edgar/data/899051/000089905118000044/0000899051-18-000044-index.htm", report_type = "10-Q")
def df_merge(df_is, df_bs, df_cfs):
# The function df_merge makes sure the data in each df_is, df_bs and df_cfs dataframes are orderly
# and same format and compatible for pushing it to the database. It handles missing data, changes all data
# to one format and adds columns of zeroes when a column is not available for one company so that all
# companies have exactly same no. of columns
######### INCOME STATEMENT #################
dfis = df_is
dfis = dfis.dropna(thresh = 6)
try:
dfis["report_period"] = dfis["report_period"].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"))
dfis = dfis.sort_values(by = "report_period")
except:
print ("Data not found, filling columns to zeroes")
pass
col_lst = list(dfis.columns)
if "Diluted Shares O/S" in col_lst:
dfis["Diluted Shares O/S"] = dfis["Diluted Shares O/S"].fillna(method = "ffill")
dfis = dfis.fillna(0)
# dfis = dfis.set_index("quarter")
if "sales" in col_lst:
dfis["sales"] = dfis["sales"].apply(lambda x: float(x))
else:
dfis["sales"] = [0 for i in range(len(dfis))]
if "costofsales" in col_lst:
dfis["costofsales"] = dfis["costofsales"].apply(lambda x: float(x))
else:
dfis["costofsales"] = [0 for i in range(len(dfis))]
if "sellinggeneraladmin" in col_lst:
dfis["sellinggeneraladmin"] = dfis["sellinggeneraladmin"].apply(lambda x: float(x))
else:
dfis["sellinggeneraladmin"] = [0 for i in range(len(dfis))]
if "generaladmin" in col_lst:
dfis["generaladmin"] = dfis["generaladmin"].apply(lambda x: float(x))
else:
dfis["generaladmin"] = [0 for i in range(len(dfis))]
if "marketing" in col_lst:
dfis["marketing"] = dfis["marketing"].apply(lambda x: float(x))
else:
dfis["marketing"] = [0 for i in range(len(dfis))]
if "salary" in col_lst:
dfis["salary"] = dfis["salary"].apply(lambda x: float(x))
else:
dfis["salary"] = [0 for i in range(len(dfis))]
if "researchdevelopment" in col_lst:
dfis["researchdevelopment"] = dfis["researchdevelopment"].apply(lambda x: float(x))
else:
dfis["researchdevelopment"] = [0 for i in range(len(dfis))]
if "operatingexpense" in col_lst:
dfis["operatingexpense"] = dfis["operatingexpense"].apply(lambda x: float(x))
else:
dfis["operatingexpense"] = [0 for i in range(len(dfis))]
if "restructuringexpense" in col_lst:
dfis["restructuringexpense"] = dfis["restructuringexpense"].apply(lambda x: float(x))
else:
dfis["restructuringexpense"] = [0 for i in range(len(dfis))]
if "impairmentexpense" in col_lst:
dfis["impairmentexpense"] = dfis["impairmentexpense"].apply(lambda x: float(x))
else:
dfis["impairmentexpense"] = [0 for i in range(len(dfis))]
if "litigationexpense" in col_lst:
dfis["litigationexpense"] = dfis["litigationexpense"].apply(lambda x: float(x))
else:
dfis["litigationexpense"] = [0 for i in range(len(dfis))]
if "operatingincome" in col_lst:
dfis["operatingincome"] = dfis["operatingincome"].apply(lambda x: float(x))
else:
dfis["operatingincome"] = [0 for i in range(len(dfis))]
if "extinguishmentdebt" in col_lst:
dfis["extinguishmentdebt"] = dfis["extinguishmentdebt"].apply(lambda x: float(x))
else:
dfis["extinguishmentdebt"] = [0 for i in range(len(dfis))]
if "nonoperatingexpense" in col_lst:
dfis["nonoperatingexpense"] = dfis["nonoperatingexpense"].apply(lambda x: float(x))
else:
dfis["nonoperatingexpense"] = [0 for i in range(len(dfis))]
if "interestexpense" in col_lst:
dfis["interestexpense"] = dfis["interestexpense"].apply(lambda x: float(x))
else:
dfis["interestexpense"] = [0 for i in range(len(dfis))]
if "pretaxincome" in col_lst:
dfis["pretaxincome"] = dfis["pretaxincome"].apply(lambda x: float(x))
else:
dfis["pretaxincome"] = [0 for i in range(len(dfis))]
if "incometax" in col_lst:
dfis["incometax"] = dfis["incometax"].apply(lambda x: float(x))
else:
dfis["incometax"] = [0 for i in range(len(dfis))]
if "totalexpense" in col_lst:
dfis["totalexpense"] = dfis["totalexpense"].apply(lambda x: float(x))
else:
dfis["totalexpense"] = [0 for i in range(len(dfis))]
if "netincome" in col_lst:
dfis["netincome"] = dfis["netincome"].apply(lambda x: float(x))
else:
dfis["netincome"] = [0 for i in range(len(dfis))]
if "gaapdilutedeps" in col_lst:
dfis["gaapdilutedeps"] = dfis["gaapdilutedeps"].apply(lambda x: float(x))
else:
dfis["gaapdilutedeps"] = [0 for i in range(len(dfis))]
if "dilutedsharesos" in col_lst:
dfis["dilutedsharesos"] = dfis["dilutedsharesos"].apply(lambda x: float(x))
else:
dfis["dilutedsharesos"] = [0 for i in range(len(dfis))]
if "dps" in col_lst:
dfis["dps"] = dfis["dps"].apply(lambda x: float(x))
else:
dfis["dps"] = [0 for i in range(len(dfis))]
if "days_in_period" in col_lst:
dfis["days_in_period"] = dfis["days_in_period"].apply(lambda x: float(x))
else:
dfis["days_in_period"] = [0 for i in range(len(dfis))]
######### CASH FLOW STATEMENT #################
# dfcfs = dfs[2]
dfcfs = df_cfs
dfcfs = dfcfs.dropna(thresh = 5)
try:
dfcfs["report_period"] = dfcfs["report_period"].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"))
dfcfs = dfcfs.sort_values(by = "report_period")
except:
print ("Data not found, filling columns to zeroes")
pass
dfcfs = dfcfs.fillna(0)
# dfcfs = dfcfs.set_index("quarter")
col_lst = list(dfcfs.columns)
if "da" in col_lst:
dfcfs["da"] = dfcfs["da"].apply(lambda x: float(x))
else:
dfcfs["da"] = [0 for i in range(len(dfcfs))]
if "sbcomp" in col_lst:
dfcfs["sbcomp"] = dfcfs["sbcomp"].apply(lambda x: float(x))
else:
dfcfs["sbcomp"] = [0 for i in range(len(dfcfs))]
if "cashfromoperations" in col_lst:
dfcfs["cashfromoperations"] = dfcfs["cashfromoperations"].apply(lambda x: float(x))
else:
dfcfs["cashfromoperations"] = [0 for i in range(len(dfcfs))]
if "capex" in col_lst:
dfcfs["capex"] = dfcfs["capex"].apply(lambda x: float(x))
else:
dfcfs["capex"] = [0 for i in range(len(dfcfs))]
if "acquisitionspend" in col_lst:
dfcfs["acquisitionspend"] = dfcfs["acquisitionspend"].apply(lambda x: float(x))
else:
dfcfs["acquisitionspend"] = [0 for i in range(len(dfcfs))]
if "cashfrominvesting" in col_lst:
dfcfs["cashfrominvesting"] = dfcfs["cashfrominvesting"].apply(lambda x: float(x))
else:
dfcfs["cashfrominvesting"] = [0 for i in range(len(dfcfs))]
if "debtissuance" in col_lst:
dfcfs["debtissuance"] = dfcfs["debtissuance"].apply(lambda x: float(x))
else:
dfcfs["debtissuance"] = [0 for i in range(len(dfcfs))]
if "debtrepayment" in col_lst:
dfcfs["debtrepayment"] = dfcfs["debtrepayment"].apply(lambda x: float(x))
else:
dfcfs["debtrepayment"] = [0 for i in range(len(dfcfs))]
if "equityissuance" in col_lst:
dfcfs["equityissuance"] = dfcfs["equityissuance"].apply(lambda x: float(x))
else:
dfcfs["equityissuance"] = [0 for i in range(len(dfcfs))]
if "sharebuyback" in col_lst:
dfcfs["sharebuyback"] = dfcfs["sharebuyback"].apply(lambda x: float(x))
else:
dfcfs["sharebuyback"] = [0 for i in range(len(dfcfs))]
if "cashfromfinancing" in col_lst:
dfcfs["cashfromfinancing"] = dfcfs["cashfromfinancing"].apply(lambda x: float(x))
else:
dfcfs["cashfromfinancing"] = [0 for i in range(len(dfcfs))]
######### BALANCE SHEET #################
dfbs = df_bs
dfbs = dfbs.dropna(thresh = 5)
try:
dfbs["report_period"] = dfbs["report_period"].apply(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"))
dfbs = dfbs.sort_values(by = "report_period")
except:
print ("Data not found, filling columns to zeroes")
pass
dfbs = dfbs.fillna(0)
col_lst = list(dfbs.columns)
if "cashandcashequivalents" in col_lst:
dfbs["cashandcashequivalents"] = dfbs["cashandcashequivalents"].apply(lambda x: float(x))
else:
dfbs["cashandcashequivalents"] = [0 for i in range(len(dfbs))]
if "accountsreceivable" in col_lst:
dfbs["accountsreceivable"] = dfbs["accountsreceivable"].apply(lambda x: float(x))
else:
dfbs["accountsreceivable"] = [0 for i in range(len(dfbs))]
if "inventory" in col_lst:
dfbs["inventory"] = dfbs["inventory"].apply(lambda x: float(x))
else:
dfbs["inventory"] = [0 for i in range(len(dfbs))]
if "currentassets" in col_lst:
dfbs["currentassets"] = dfbs["currentassets"].apply(lambda x: float(x))
else:
dfbs["currentassets"] = [0 for i in range(len(dfbs))]
if "propertyplantequipment" in col_lst:
dfbs["propertyplantequipment"] = dfbs["propertyplantequipment"].apply(lambda x: float(x))
else:
dfbs["propertyplantequipment"] = [0 for i in range(len(dfbs))]
if "goodwill" in col_lst:
dfbs["goodwill"] = dfbs["goodwill"].apply(lambda x: float(x))
else:
dfbs["goodwill"] = [0 for i in range(len(dfbs))]
if "assets" in col_lst:
dfbs["assets"] = dfbs["assets"].apply(lambda x: float(x))
else:
dfbs["assets"] = [0 for i in range(len(dfbs))]
if "shorttermdebt" in col_lst:
dfbs["shorttermdebt"] = dfbs["shorttermdebt"].apply(lambda x: float(x))
else:
dfbs["shorttermdebt"] = [0 for i in range(len(dfbs))]
if "accountspayable" in col_lst:
dfbs["accountspayable"] = dfbs["accountspayable"].apply(lambda x: float(x))
else:
dfbs["accountspayable"] = [0 for i in range(len(dfbs))]
if "currentliabilities" in col_lst:
dfbs["currentliabilities"] = dfbs["currentliabilities"].apply(lambda x: float(x))
else:
dfbs["currentliabilities"] = [0 for i in range(len(dfbs))]
if "longtermdebt" in col_lst:
dfbs["longtermdebt"] = dfbs["longtermdebt"].apply(lambda x: float(x))
else:
dfbs["longtermdebt"] = [0 for i in range(len(dfbs))]
if "additionalpic" in col_lst:
dfbs["additionalpic"] = dfbs["additionalpic"].apply(lambda x: float(x))
else:
dfbs["additionalpic"] = [0 for i in range(len(dfbs))]
if "retainedearnings" in col_lst:
dfbs["retainedearnings"] = dfbs["retainedearnings"].apply(lambda x: float(x))
else:
dfbs["retainedearnings"] = [0 for i in range(len(dfbs))]
if "ncinterest" in col_lst:
dfbs["ncinterest"] = dfbs["ncinterest"].apply(lambda x: float(x))
else:
dfbs["ncinterest"] = [0 for i in range(len(dfbs))]
if "equity" in col_lst:
dfbs["equity"] = dfbs["equity"].apply(lambda x: float(x))
else:
dfbs["equity"] = [0 for i in range(len(dfbs))]
if "liabilitiesequity" in col_lst:
dfbs["liabilitiesequity"] = dfbs["liabilitiesequity"].apply(lambda x: float(x))
else:
dfbs["liabilitiesequity"] = [0 for i in range(len(dfbs))]
return dfis, dfbs, dfcfs
# df_ttm
def database(df_is, df_bs, df_cfs):
# The function pushes the completed dataframe of income statement, balance sheet and cash flow statement
# into the database
df_is.to_sql(name = "income_statement", con = engine, if_exists = "append")
df_bs.to_sql(name = "balance_sheet", con = engine, if_exists = "append")
df_cfs.to_sql(name = "cash_flow_statement", con = engine, if_exists = "append")
print ("Entered into the Database")
time.sleep(10)
print ("Restarting.....\n\n\n\n")
# df_ttm.to_sql(name = "financial_data", con = engine, if_exists = "append")
return 0
def edgar_crawler(IDX, COMPANIES_TO_RUN):
# The function edgar_crawler pulls all the functions together. It starts with calling the sp500_cik
# which contains ciks, tickers and ids (alternative forms are available for this, see my wiki scraper).
# It then creates an url from the base url which becomes the argument for the first function first_scraper.
# Then second_scraper is called in a loop which opens every 10-K and 10-Q one at a time and starts
# scraping the data from each filing using the edgar_crawler function. Once the is_dct, bs_dct and cfs_dct
# is returned by the edgar_scraper, it is appended into the respective empty dictionaries. Then the
# df_merge function is called to do the formating. Once formating is complete, df_is, df_bs and df_cfs
# is pushed into the dataframe using the database function.
# Note that it's currently structured particularly to append non existing data to the database i.e.
# new quarters or anuuals
count = 0
metadata = sp500_cik()
ciks = metadata[0]
tickers = metadata[1]
ids = metadata[2]
url = "https://www.sec.gov/cgi-bin/browse-edgar"
# IDX = 0
# COMPANIES_TO_RUN = 1
for cik in ciks[IDX:IDX + COMPANIES_TO_RUN]:
# Get all the quarters currently in the database
date_lst = []
try:
cur.execute("select report_period from income_statement where ticker = (%s)", (tickers[IDX],))
data = cur.fetchall()
for date in data:
try:
date = date[0].strftime("%Y-%m-%d")
date_lst.append(date)
except:
continue
print (date_lst)
except:
pass
report_lst = ["10-Q", "10-K"]
# report_lst = ["10-Q"]
df_is = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert | is_float(1.1) | pandas.core.dtypes.common.is_float |
import sys
import pandas as pd
from treasureisland.dna_sequence import sequence
def flatten_result(pred):
flat_result = []
for org in pred.keys():
org_result = pred[org]
flat_result.extend(org_result)
return flat_result
def main(seqfile):
seq = sequence(seqfile)
pred = seq.predict()
flat_pred = flatten_result(pred)
df = | pd.DataFrame(flat_pred, columns=['accession', 'start', 'end', 'probability']) | pandas.DataFrame |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for proteinfer.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
import test_util
class TestUtilTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='empty df',
df1=pd.DataFrame(),
df2=pd.DataFrame(),
),
dict(
testcase_name='one column, ints',
df1= | pd.DataFrame({'col1': [1, 2, 3]}) | pandas.DataFrame |
import sqlite3
import uuid
import numpy as np
import pandas as pd
import time
import sys
import ast
import os
import re
from random import shuffle as shuffle_list
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
#from tensorflow import set_random_seed #valid for tensorflow1
from tensorflow import random #valid for tensorflow1
from sklearn.metrics import mean_squared_error
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.models import model_from_json
import preprocessing.config as cfg
sns.set() # nicer graphics
def measure_time(func):
"""time measuring decorator"""
def wrapped(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
end_time = time.time()
print('took {:.3} seconds'.format(end_time-start_time))
return ret
return wrapped
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
class LoadprofileGenerator(TimeseriesGenerator):
"""This is a customized version of keras TimeseriesGenerator. Its
intention is to neglect strides and sampling rates but to incorporate
iteration through several timeseries or loadprofiles of arbitrary length."""
def __init__(self, data, targets, length, start_index=0,
shuffle=False, reverse=False, batch_size=128):
super().__init__(data, targets, length, start_index=start_index,
shuffle=shuffle, reverse=reverse,
end_index=len(data[0]), batch_size=batch_size)
assert isinstance(data, list), 'data must be list of timeseries'
if any(isinstance(i, pd.DataFrame) for i in self.data):
self.data = [i.values for i in self.data]
if any(isinstance(i, pd.DataFrame) for i in self.targets):
self.targets = [i.values for i in self.targets]
if self.shuffle:
zippd = list(zip(self.data, self.targets))
shuffle_list(zippd) # inplace operation
self.data, self.targets = list(zip(*zippd))
# start index is the same for each profile
# for each profile there's a different end_index
self.end_index = [len(d)-1 for d in self.data]
batches_per_profile = [(e - self.start_index + self.batch_size)//
self.batch_size for e in self.end_index]
self.data_len = sum(batches_per_profile)
self.batch_cumsum = np.cumsum(batches_per_profile)
def __len__(self):
return self.data_len
def _empty_batch(self, num_rows):
# shape of first profile suffices
samples_shape = [num_rows, self.length]
samples_shape.extend(self.data[0].shape[1:])
targets_shape = [num_rows]
targets_shape.extend(self.targets[0].shape[1:])
return np.empty(samples_shape), np.empty(targets_shape)
def __getitem__(self, index):
# index is the enumerated batch index starting at 0
# find corresponding profile
p_idx = np.nonzero(index < self.batch_cumsum)[0][0]
prev_sum = 0 if p_idx == 0 else self.batch_cumsum[p_idx-1]
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index[p_idx] + 1,
size=self.batch_size)
else:
i = self.start_index + self.batch_size * (index - prev_sum)
rows = np.arange(i, min(i + self.batch_size,
self.end_index[p_idx] + 2))
# +2 to get the last element, too
samples, targets = self._empty_batch(len(rows))
for j, row in enumerate(rows):
indices = range(row - self.length, row)
samples[j] = self.data[p_idx][indices]
targets[j] = self.targets[p_idx][row-1]
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
class Report:
"""Summary of an experiment/trial"""
TARGET_SCHEME = cfg.data_cfg['db_target_scheme']
TABLE_SCHEMES = \
{'predictions': ['id text', 'idx int'] +
['{} real' for _ in range(len(TARGET_SCHEME))] +
['{} real' for _ in range(len(TARGET_SCHEME))],
'meta_experiments': ['id text', 'target text', 'testset text',
'score real', 'loss_metric text', 'seed text',
'scriptname text', 'start_time text',
'end_time text', 'config text']
}
def __init__(self, uid, seed,
score=None, yhat=None, actual=None, history=None,
used_loss=None, model=None):
self.score = score
self.yhat_te = yhat
self.actual = actual
self.history = history
self.uid = uid
self.seed = seed
self.yhat_tr = None
self.start_time = datetime.now().strftime("%Y-%m-%d %H:%M")
self.used_loss = used_loss
self.model = model
self.cfg_blob = {}
def save_to_db(self):
if cfg.data_cfg['save_predictions'] and not cfg.debug_cfg['DEBUG']:
cols = self.yhat_te.columns.tolist()
assert all(t in self.TARGET_SCHEME for t in cols), \
'config has unknown target specified'
# fill up missing targets up to TARGET_SCHEME
df_to_db = self.yhat_te.copy()
df_to_db = df_to_db.assign(**{t: 0 for t in self.TARGET_SCHEME
if t not in cols})
df_to_db = df_to_db.loc[:, self.TARGET_SCHEME] # reorder cols
gtruth_to_db = self.actual.copy()
gtruth_to_db = gtruth_to_db.assign(**{t: 0 for t in
self.TARGET_SCHEME
if t not in cols})
gtruth_to_db = gtruth_to_db.loc[:, self.TARGET_SCHEME]\
.rename(columns={t:t+'_gtruth' for t in gtruth_to_db.columns})
df_to_db = pd.concat([df_to_db, gtruth_to_db], axis=1)
with sqlite3.connect(cfg.data_cfg['db_path']) as con:
# predictions
table_name = 'predictions'
table_scheme = self.TABLE_SCHEMES[table_name]
query = "CREATE TABLE IF NOT EXISTS " + \
"{}{}".format(table_name, tuple(table_scheme))\
.replace("'", "")
query = query.format(*df_to_db.columns)
con.execute(query)
df_to_db['id'] = self.uid
df_to_db['idx'] = self.yhat_te.index
entries = [tuple(x) for x in np.roll(df_to_db.values,
shift=2, axis=1)]
query = f'INSERT INTO {table_name} ' + \
'VALUES ({})'.format(
', '.join('?' * len(df_to_db.columns)))
con.executemany(query, entries)
# meta experiments
table_name = 'meta_experiments'
table_scheme = self.TABLE_SCHEMES[table_name]
query = "CREATE TABLE IF NOT EXISTS " + \
"{}{}".format(table_name, tuple(table_scheme))\
.replace("'", "")
con.execute(query)
config_blob = {**cfg.data_cfg, **cfg.keras_cfg, **cfg.lgbm_cfg}
if hasattr(self.model, 'sk_params'):
config_blob['sk_params'] = self.model.sk_params
entry = (self.uid,
str(cfg.data_cfg['Target_param_names']),
str(cfg.data_cfg['testset']),
str(self.score),
cfg.data_cfg['loss'],
str(self.seed),
os.path.basename(sys.argv[0]),
self.start_time,
datetime.now().strftime("%Y-%m-%d %H:%M"),
str(config_blob),
)
query = f'INSERT INTO {table_name} VALUES {entry}'
con.execute(query)
print(f'Predictions and meta of model with uuid {self.uid} '
f'saved to db.')
def save_model(self):
if not cfg.debug_cfg['DEBUG'] and self.model is not None:
self.model.save(self.uid)
print(f'Model arch and weights dumped for {self.uid}.')
def load_model(self):
path = os.path.join(cfg.data_cfg['model_dump_path'],
self.uid+'_arch.json')
with open(path, 'r') as f:
self.model = model_from_json(f.read())
self.model.compile(optimizer='adam', loss='mse')
self.model.load_weights(os.path.join(cfg.data_cfg['model_dump_path'],
self.uid+'_weights.h5'))
return self
@classmethod
def load(clf, uid, truncate_at=None):
"""Return a Report object from uid. Uid must exist in database."""
with sqlite3.connect(cfg.data_cfg['db_path']) as con:
query = """SELECT * FROM predictions WHERE id=?"""
pred_table = pd.read_sql_query(query, con, params=(uid,))
query = """SELECT * FROM meta_experiments WHERE id=?"""
meta_table = pd.read_sql_query(query, con, params=(uid,))
cfg.data_cfg['Target_param_names'] = \
ast.literal_eval(meta_table.target[0]) # str of list -> list
cfg.data_cfg['testset'] = ast.literal_eval(meta_table.testset[0])
target_cols = cfg.data_cfg['Target_param_names']
yhat = pred_table.loc[:, target_cols]
actual = pred_table.loc[:, [t+'_gtruth' for t in target_cols]]
actual = actual.rename(columns=lambda c: c.replace('_gtruth', ''))
score = meta_table.score[0]
seed = meta_table.seed[0]
used_loss = meta_table.loss_metric[0]
cfg_blob = meta_table.config[0]
if truncate_at is not None:
actual = actual.iloc[:truncate_at, :]
yhat = yhat.iloc[:truncate_at, :]
report = clf(uid, seed, score, yhat, actual, used_loss=used_loss)
# fix the wrongly placed mse func obj in the cfg blob with string
report.cfg_blob = eval(re.sub('<[A-Za-z0-9_]+(?:\s+[a-zA-Z0-9_]+)*>',
"'mse'", cfg_blob))
try:
report.load_model()
except FileNotFoundError:
print(f'Couldnt load model {uid}. '
f'Weight or architecture file not found.')
return report
def plot(self, show=True):
plt.figure()
linestyles = ['-', '--', ':', '-.']
col = 2
plot_row_idx = 1
if self.history is not None:
history = self.history.history
col += 1
plt.subplot(col, 1, plot_row_idx)
plt.plot(history['loss'], label='train loss')
plt.plot(history['val_loss'], label='validation loss')
plt.xlabel('epoch')
plt.ylabel(f'{self.used_loss} in K²')
plt.title(f'Training/Validation Score over Epochs of Experiment '
f'{self.uid}')
plt.legend()
plot_row_idx += 1
plt.subplot(col, 1, plot_row_idx)
plot_row_idx += 1
# plot performance on testset
for i, c in enumerate(self.actual):
plt.plot(self.actual[c], alpha=0.6, color='darkorange',
label='ground truth '+c,
linestyle=linestyles[i])
for i, c in enumerate(self.yhat_te):
plt.plot(self.yhat_te[c], lw=2, color='navy',
label='predicted '+c,
linestyle=linestyles[i])
plt.xlabel('time in s')
plt.ylabel('temperature in °C')
plt.title(f'Prediction and ground truth of experiment {self.uid}')
plt.legend()
plt.subplot(col, 1, plot_row_idx)
for i, c in enumerate(self.actual):
plt.plot(self.yhat_te[c] - self.actual[c], color='red',
label='prediction error ' + c,
linestyle=linestyles[i])
plt.xlabel('time in s')
plt.ylabel('temperature in K')
plt.title(f'Prediction Error of Experiment '
f'{self.uid}')
plt.legend()
# plot performance on trainset
if self.yhat_tr is not None:
y_tr, yhat_tr = self.yhat_tr
plt.figure()
plt.plot(y_tr, alpha=0.6, color='darkorange', label='ground truth')
plt.plot(yhat_tr, lw=2, color='navy', label='prediction')
if show:
plt.show()
def paper_1_plot_testset_performance(self):
sns.set_context('paper')
cols_to_plot = cfg.data_cfg['Target_param_names'] #['stator_winding']
#self.actual = self.actual.loc[30:, cols_to_plot]
#self.yhat_te = self.yhat_te.loc[30:, cols_to_plot]
def _format_plot():
plt.xlabel('time in h')
plt.ylabel('temperature in °C')
plt.legend()
plt.xlim(-1000, np.around(len(self.actual), -3) + 300)
tcks = np.arange(0, np.around(len(self.actual), -3), 7200)
plt.xticks(tcks, tcks // 7200)
sns.set_style('whitegrid')
plt.figure(figsize=(10, 3.5))
linestyles = ['-', '--', ':', '-.']
plt.subplot(1, 2, 1)
plt.title('Prediction and ground truth')
param_map = {'pm': '{PM}',
'stator_tooth': '{ST}',
'stator_yoke': '{SY}',
'stator_winding': '{SW}'}
for i, c in enumerate(self.actual):
plt.plot(self.actual[c], alpha=0.6, color='green',
label=r'$\theta_{}$'.format(param_map[c]),
linestyle=linestyles[i])
for i, c in enumerate(self.yhat_te):
plt.plot(self.yhat_te[c], lw=2, color='navy',
label=r'$\hat \theta_{}$'.format(param_map[c]),
linestyle=linestyles[i])
_format_plot()
plt.subplot(1, 2, 2)
plt.title('Prediction Error')
clrs = ['red', 'magenta', 'darkorange', 'yellow' ]
for i, c in enumerate(self.actual):
plt.plot(self.yhat_te[c] - self.actual[c], color=clrs[i],
label='prediction error ' +
r'$\theta_{}$'.format(param_map[c]),
#linestyle=linestyles[i]
)
_format_plot()
plt.show()
def presentation_plot_testset_performance(self, trunc=True):
sns.set_context('talk')
sns.set_style('whitegrid')
if trunc:
truncate_at = 40092
self.yhat_te = self.yhat_te.iloc[:truncate_at, :]
self.actual = self.actual.iloc[:truncate_at, :]
param_map = {'pm': '{PM}',
'stator_tooth': '{ST}',
'stator_yoke': '{SY}',
'stator_winding': '{SW}'}
n_targets = len(self.actual.columns)
plt.figure(figsize=(10, 1.5 * (n_targets)))
def _format_plot(y_lbl='temp', x_lbl=True, legend=True,
legend_loc='best'):
if x_lbl:
plt.xlabel('Time in h')
if y_lbl == 'temp':
plt.ylabel('Temperature in °C')
elif y_lbl == 'motor_speed':
plt.ylabel('Motor speed in 1/min')
elif y_lbl.startswith('i_'):
plt.ylabel('Current in A')
elif y_lbl in param_map:
plt.ylabel(r'$\theta_{}$ in °C'.format(param_map[y_lbl]))
if legend:
plt.legend(loc=legend_loc)
plt.xlim(-1000, np.around(len(self.actual), -3) + 300)
tcks = np.arange(0, np.around(len(self.actual), -3), 7200)
tcks_lbls = tcks // 7200 if x_lbl else []
plt.xticks(tcks, tcks_lbls)
for i, c in enumerate(self.actual):
diff = self.yhat_te[c] - self.actual[c]
ax = plt.subplot(n_targets, 2, 2 * i + 1)
if i == 0:
plt.title('Prediction and ground truth')
plt.plot(self.actual[c], color='green',
label=r'$\theta_{}$'.format(param_map[c]),
linestyle='-')
plt.plot(self.yhat_te[c], lw=2, color='navy',
label=r'$\hat \theta_{}$'.format(param_map[c]),
linestyle='-')
_format_plot(legend=False, x_lbl=i > 5, y_lbl=c)
plt.text(0.6, 0.9,
s=f'MSE: {(diff ** 2).mean():.2f} K²',
bbox={'facecolor': 'white'}, transform=ax.transAxes,
verticalalignment='top', horizontalalignment='center')
ax = plt.subplot(n_targets, 2, 2 * (i + 1))
if i == 0:
plt.title('Prediction error')
plt.plot(diff, color='red',
label='Prediction error ' +
r'$\theta_{}$'.format(param_map[c]))
_format_plot(x_lbl=i > 5, legend=False, y_lbl=c)
def paper_0_plot_testset_performance(self, testset_x, trunc=True):
sns.set_context('paper')
sns.set_style('whitegrid')
if trunc:
truncate_at = 40092
self.yhat_te = self.yhat_te.iloc[:truncate_at, :]
self.actual = self.actual.iloc[:truncate_at, :]
param_map = {'pm': '{PM}',
'stator_tooth': '{ST}',
'stator_yoke': '{SY}',
'stator_winding': '{SW}'}
input_param_map = {'motor_speed': 'Motor speed',
'coolant': 'Coolant temperature',
'i_q': 'q-Axis current',
'i_d': 'd-Axis current',
}
def _format_plot(y_lbl='temp', x_lbl=True, legend=True,
legend_loc='best'):
if x_lbl:
plt.xlabel('Time in h')
if y_lbl == 'temp':
plt.ylabel('Temperature in °C')
elif y_lbl == 'motor_speed':
plt.ylabel('Motor speed in 1/min')
elif y_lbl.startswith('i_'):
plt.ylabel('Current in A')
if legend:
plt.legend(loc=legend_loc)
plt.xlim(-1000, np.around(len(self.actual), -3) + 300)
tcks = np.arange(0, np.around(len(self.actual), -3), 7200)
tcks_lbls = tcks // 7200
plt.xticks(tcks, tcks_lbls)
n_targets = len(self.actual.columns)
plt.figure(figsize=(10, 1.5*(n_targets+2)))
for i, c in enumerate(self.actual):
diff = self.yhat_te[c] - self.actual[c]
ax = plt.subplot(n_targets + 2, 2, 2*i+1)
if i == 0:
plt.title('Prediction and ground truth')
plt.plot(self.actual[c], color='green',
label=r'$\theta_{}$'.format(param_map[c]),
linestyle='-')
plt.plot(self.yhat_te[c], lw=2, color='navy',
label=r'$\hat \theta_{}$'.format(param_map[c]),
linestyle='-')
_format_plot(x_lbl=False, legend_loc='lower right')
plt.text(0.6, 0.9,
s=f'MSE: {(diff**2).mean():.2f} K²',
bbox={'facecolor': 'white'}, transform=ax.transAxes,
verticalalignment='top', horizontalalignment='center')
ax = plt.subplot(n_targets + 2, 2, 2*(i+1))
if i == 0:
plt.title('Prediction error')
plt.plot(diff, color='red',
label='Prediction error ' +
r'$\theta_{}$'.format(param_map[c]))
_format_plot(x_lbl=False, legend_loc='lower center')
plt.text(0.5, 0.9,
bbox={'facecolor': 'white'}, transform=ax.transAxes,
s=r'$L_{\infty}$: '+f'{diff.abs().max():.2f} K',
verticalalignment='top', horizontalalignment='center')
for i, c in enumerate(input_param_map.keys()):
y_lbl = 'temp' if c in ['ambient', 'coolant'] else c
plt.subplot(n_targets + 2, 2, 2*(n_targets+2)-i)
plt.title(input_param_map[c])
plt.plot(testset_x[c], color='g')
_format_plot(legend=False, y_lbl=y_lbl, x_lbl=i < 2)
# plt.show()
def print(self):
print('')
print('#' * 20)
print("Trial Report")
print(f"Trial ID: {self.uid}")
print(f"{self.used_loss}: {self.score:.6} K²")
# todo: print all other attributes too
print('#' * 20)
class TrialReports:
"""Manages a list of reports"""
def __init__(self, seed=0, reports=None):
if reports is not None:
assert isinstance(reports, list), 'ping!'
self.reports = reports
else:
self.reports = []
self.seed = seed
self.start_time = datetime.now().strftime("%Y-%m-%d %H:%M")
self.end_time = '-1'
self.ensemble_score = -1.0
self.data_cache = {}
def __add__(self, report):
assert isinstance(report, Report), 'ping!'
self.reports.append(report)
return self
def __str__(self):
self.print()
return '\n'
def get_scores(self):
return [r.score for r in self.reports]
def get_mean_score(self):
return np.mean(self.get_scores())
def get_uids(self):
return [r.uid for r in self.reports]
def get_predictions_te(self):
return [r.yhat_te for r in self.reports]
def conduct(self, n_trials):
"""Generator function to conduct trials sequentially."""
for i in range(n_trials):
trial_seed = self.seed + i
model_uuid = str(uuid.uuid4())[:6]
print('model uuid: {}, seed: {}'.format(model_uuid, trial_seed))
np.random.seed(trial_seed)
#set_random_seed(trial_seed) #valid for tensorflow1
random.set_seed(trial_seed) #valid for tensorflow2
report = Report(uid=model_uuid, seed=trial_seed)
yield report
report.save_to_db()
report.save_model()
self.reports.append(report)
# get ensemble performance
self.ensemble_score = \
mean_squared_error(self.reports[0].actual,
np.mean(np.dstack(r.yhat_te for r in
self.reports), axis=2))
self.print()
def print(self, plot=True):
"""Print summary of trial performances.
Expects a list of dictionaries with details about the conducted trials.
If configured, best model will be plotted as well.
Confidence Interval of 95% for Metric Mean is constructed by
t-distribution instead of normal, since sample size is most of the time
< 30
"""
# summary statistic
scores = np.asarray(self.get_scores())
bmr = best_model_report = self.reports[np.argmin(scores)]
print('')
print('#'*20)
print(
f"""Performance Report
# trials: {len(scores)}
mean MSE: {scores.mean():.6} K² +- {stats.t.ppf(1-0.025, len(scores)):.3} K²
std MSE: {scores.std():.6} K²
Best Model: uuid {bmr.uid}; seed {bmr.seed}; score {bmr.score:.6} K²
Ensemble Score: {self.ensemble_score:.6} K²
"""
)
print('#'*20)
# plot best model performance
if cfg.plot_cfg['do_plot'] and plot:
try:
bmr.plot()
except Exception:
print("Plotting failed..")
@staticmethod
def conduct_step(model_func, seed, init_params, fit_params,
predict_params, inverse_params, dm):
model_uuid = str(uuid.uuid4())[:6]
print('model uuid: {}, seed: {}'.format(model_uuid, seed))
np.random.seed(seed)
#set_random_seed(seed) #valid for tensorflow1
random.set_seed(seed) #valid for tensorflow2
report = Report(uid=model_uuid, seed=seed)
model = model_func(**init_params)
report.history = model.fit(**fit_params)
report.model = model
if report.history.history.get('nan_output', False):
# training failed
report.actual = dm.inverse_transform(**inverse_params).iloc[:2, :]
report.yhat_te = report.actual.copy()
report.score = float(report.history.history['loss'][-1])
else:
# training successful
try:
report.yhat_te = model.predict(**predict_params)
report.actual = dm.inverse_transform(**inverse_params)
inverse_params_copy = inverse_params.copy() # important!
inverse_params_copy['df'] = report.yhat_te
report.yhat_te = dm.inverse_transform(**inverse_params_copy)
report.score = model.score(report.actual, report.yhat_te,
score_directly=True)
report.save_model()
except ValueError:
print('ValueError on this config:\n {}'.format({
**init_params, **fit_params, **predict_params}))
raise
report.save_to_db()
report.print()
return report
class HyperparameterSearchReport:
"""Manages a list of TrialReports. This class loads tables from a sqlite
database with a certain scheme and performs some analysis. More
specifically, the list of loaded trial-reports pertain to those generated
during a certain hyperparameter search with hp_tune_xx.py."""
bayes_col_filter = ['n_iter', 'model_uids', 'mean_score', 'best_score',
'start_time', 'end_time']
def __init__(self):
self.hp_searches = {}
print('Reading', cfg.data_cfg['db_path'], '..')
with sqlite3.connect(cfg.data_cfg['db_path']) as con:
query = """SELECT * FROM meta_experiments"""
self.meta_tab = pd.read_sql_query(query, con)
query = """SELECT * FROM bayes_opt_results"""
self.bayes_tab = pd.read_sql_query(query, con)
assert not self.meta_tab.id.duplicated().any(), \
'Duplicated ID found! -> {}. 6 digits too few?'.format(
self.meta_tab.id[self.meta_tab.id.duplicated()])
def read_search(self, hp_search_uid, verbose=True):
tab = (self.bayes_tab
.loc[self.bayes_tab.bayes_search_id == hp_search_uid,
self.bayes_col_filter]
.sort_values(by='n_iter', axis=0)
.reset_index(drop=True))
# get runtime
time_format = "%Y-%m-%d %H:%M"
runtime = (pd.to_datetime(tab.end_time, format=time_format) -
pd.to_datetime(tab.start_time, format=time_format)).sum()
if verbose:
print(f'Runtime of experiment {hp_search_uid}: {runtime}')
# get model ids and their score std
model_uids = (tab['model_uids'].astype('object')
.apply(eval).apply(pd.Series).stack()
.reset_index(drop=True, level=-1)
.rename("id").rename_axis('n_iter').reset_index())
# merge corresponding single scores from meta_table
model_uids = pd.merge(model_uids, self.meta_tab, how='left', on='id')
# get id of best model
best_model_id = model_uids.at[model_uids.score.idxmin(), 'id']
# get std per iter
grp = model_uids[['n_iter', 'score', 'id']].groupby('n_iter')
grp_std = grp['score'].std().reset_index()\
.rename(columns={'score': 'std_score'})
grp_best_model_id = grp['id'].min().reset_index()\
.rename(columns={'id': 'best_model_id'})
tab = | pd.merge(tab, grp_std, how='left', on='n_iter') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode
#used_recall_source = 'i2i_w02-b2b-i2i2i'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10-i2i2b'
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source}')
def feat_item_sum_mean_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['sum','mean'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['sum','mean'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_sum_sim_loc_time_weight(data):
df = data.copy()
df = df[ ['index','sim_weight','loc_weight','time_weight'] ]
feat = df[ ['index'] ]
feat['sum_sim_loc_time_weight'] = df['sim_weight'] + df['loc_weight'] + df['time_weight']
feat = feat[ ['sum_sim_loc_time_weight'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_text_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_cossim'] ]
return feat
def feat_road_item_text_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text )
return a
else:
return np.nan
feat['road_item_text_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_eulasim'] ]
return feat
def feat_road_item_text_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text, ord=1 )
return a
else:
return np.nan
feat['road_item_text_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_mansim'] ]
return feat
def feat_road_item_image_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
c = np.dot( item1_image, item2_image )
a = np.linalg.norm( item1_image )
b = np.linalg.norm( item2_image )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_image_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_cossim'] ]
return feat
def feat_road_item_image_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image )
return a
else:
return np.nan
feat['road_item_image_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_eulasim'] ]
return feat
def feat_road_item_image_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image, ord=1 )
return a
else:
return np.nan
feat['road_item_image_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_mansim'] ]
return feat
def feat_i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
i2i_sim_seq = {}
st0 = time.time()
tot = 0
for user, items in user_item_dict.items():
times = user_time_dict[user]
if tot % 500 == 0:
print( f'tot: {len(user_item_dict)}, now: {tot}' )
tot += 1
for loc1, item in enumerate(items):
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
if (item,relate_item) not in new_keys:
continue
t1 = times[loc1]
t2 = times[loc2]
i2i_sim_seq.setdefault((item,relate_item), [])
i2i_sim_seq[ (item,relate_item) ].append( (loc1, loc2, t1, t2, len(items) ) )
st1 = time.time()
print(st1-st0)
return i2i_sim_seq
def feat_i2i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:50]:
res[i[0]]=i[1]
sim_item_p2[key] = res
i2i2i_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if item2 == item1:
continue
for item3 in sim_item_p2[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2i_sim_seq.setdefault((item1,item3), [])
i2i2i_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], sim_item_p2[item2][item3],
sim_item_p1[item1][item2], sim_item_p1[item2][item3] ) )
return i2i2i_sim_seq
def feat_i2i2b_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:100]:
res[i[0]]=i[1]
sim_item_p2[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:100]:
blend_score[i][j] = cij
i2i2b_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if (item2 == item1) or (item2 not in blend_score.keys()):
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2b_sim_seq.setdefault((item1,item3), [])
i2i2b_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], blend_score[item2][item3],
sim_item_p1[item1][item2], blend_score[item2][item3] ) )
return i2i2b_sim_seq
def feat_i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
for key in new_keys:
if np.isnan( result[key] ):
continue
result[key] = result[key] / ((item_cnt[key[0]] * item_cnt[key[1]]) ** 0.2)
print('Finished getting result')
feat['i2i_sim'] = feat['new_keys'].map(result)
#import pdb
#pdb.set_trace()
#i2i_seq_feat = pd.concat( [feat,i2i_seq_feat], axis=1 )
#i2i_seq_feat['itemAB'] = i2i_seq_feat['road_item'].astype('str') + '-' + i2i_seq_feat['item'].astype('str')
feat = feat[ ['i2i_sim'] ]
return feat
def feat_i2i_sim_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += loc_weight
feat['i2i_sim_abs_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_abs_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
loc_diff = loc1-loc2
loc_weight = (loc_base**loc_diff)
if abs(loc_weight) <= 0.2:
if loc_weight > 0:
loc_weight = 0.2
else:
loc_weight = -0.2
result[key] += loc_weight
feat['i2i_sim_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_abs_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
result[key] += time_weight
feat['i2i_sim_abs_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_sim_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - (t1 - t2) * 100)
if abs(time_weight)<=0.2:
if time_weight > 0:
time_weight = 0.2
else:
time_weight = -0.2
result[key] += time_weight
feat['i2i_sim_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - abs(t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = abs(loc2-loc1)
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
if loc1-loc2>0:
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
else:
result[key] -= 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_mean_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += ( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len) ) / len(records)
feat['i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_sum_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] + item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_sum_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_sum_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_multi_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] * item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_multi_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_multi_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_b2b_sim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
blend_sim = utils.load_sim(item_blend_sim_path)
b2b_sim = {}
for item in blend_sim:
i = item[0]
b2b_sim.setdefault(i,{})
for j,cij in item[1][:100]:
b2b_sim[i][j] = cij
vals = feat[ ['road_item','item'] ].values
result = []
for val in vals:
item1 = val[0]
item2 = val[1]
if item1 in b2b_sim.keys():
if item2 in b2b_sim[item1].keys():
result.append( b2b_sim[ item1 ][ item2 ] )
else:
result.append( np.nan )
else:
result.append( np.nan )
feat['b2b_sim'] = result
feat = feat[ ['b2b_sim'] ]
return feat
def feat_itemqa_loc_diff(data):
df = data.copy()
feat = df[ ['index','query_item_loc','road_item_loc'] ]
feat['itemqa_loc_diff'] = feat['road_item_loc'] - feat['query_item_loc']
def func(s):
if s<0:
return -s
return s
feat['abs_itemqa_loc_diff'] = feat['itemqa_loc_diff'].apply(func)
feat = feat[ ['itemqa_loc_diff','abs_itemqa_loc_diff'] ]
return feat
def feat_sim_three_weight(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean'] = feat['com_item_loc_weights_sum'] / feat['com_item_cnt']
feat['com_item_time_weights_mean'] = feat['com_item_time_weights_sum'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum','com_item_time_weights_sum','com_item_record_weights_sum',
'com_item_loc_weights_mean','com_item_time_weights_mean','com_item_record_weights_mean' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
feat['user_item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
for col in ['i2i_score','blend_score','i2i2i_score']:
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user_item',col+'_sum'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user_item',col+'_mean'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_different_type_road_score_sum_mean_new(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
recall_source_names = ['i2i_w02','b2b','i2i2i','i2i_w10','i2i2b']
recall_source_names = [ i+'_score' for i in recall_source_names ]
for idx,col in enumerate(recall_source_names):
feat[col] = feat['sim_weight']
feat.loc[ feat['recall_type']!=idx, col ] = np.nan
for col in recall_source_names:
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user','item',col+'_sum'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user','item',col+'_mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat_list = recall_source_names + [ col+'_sum' for col in recall_source_names ] + [ col+'_mean' for col in recall_source_names ]
feat = feat[ feat_list ]
return feat
def feat_sim_base(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
road_item_cnt = np.zeros( num, dtype=float )
t_item_cnt = np.zeros( num, dtype=float )
com_item_cij = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
road_item_cnt[i] = item_cnt[ road_item[i] ]
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_cij[i] = sim_item[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
road_item_cnt[i] = np.nan
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
if t_item[i] in item_set:
t_item_cnt[i] = item_cnt[ t_item[i] ]
else:
t_item_cnt[i] = np.nan
feat['road_item_cnt'] = road_item_cnt
feat['item_cnt'] = t_item_cnt
feat['com_item_cij'] = com_item_cij
feat['com_item_cnt'] = t_com_item_cnt
feat = feat[ ['road_item_cnt','item_cnt','com_item_cij','com_item_cnt' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_abs_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if loc_weight<=0.1:
loc_weight = 0.1
result.append(loc_weight)
feat['u2i_abs_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if abs(loc_weight)<=0.1:
loc_weight = 0.1
if loc2 < loc1:
loc_weight = -loc_weight
result.append(loc_weight)
feat['u2i_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_abs_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if time_weight<=0.1:
time_weight = 0.1
result.append(time_weight)
feat['u2i_abs_time_weights'] = result
cols = [ 'u2i_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_u2i_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if abs(time_weight)<=0.1:
time_weight = 0.1
if t1 > t2:
time_weight = -time_weight
result.append(time_weight)
feat['u2i_time_weights'] = result
cols = [ 'u2i_time_weights' ]
feat = feat[ cols ]
return feat
def feat_automl_cate_count(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'road_item','item','road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_automl_user_cate_count(data):
df = data.copy()
feat = df[ ['index','user','road_item','item'] ]
feat['user-road_item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str')
feat['user-item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
feat['user-road_item-item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'user-road_item','user-item','user-road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_u2i_road_item_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time']]
feat = feat.groupby(['user','road_item_loc']).first().reset_index()
feat_group = feat.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_group['road_item_time'].diff(1)
feat2 = feat_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_time_diff_history'
feat2.name = 'u2i_road_item_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2],axis=1),how='left',on=['user','road_item_loc'])
cols = [ 'u2i_road_item_time_diff_history', 'u2i_road_item_time_diff_future' ]
feat = feat[ cols ]
return feat
def feat_road_item_text_dot(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
return c
else:
return np.nan
feat['road_item_text_dot'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_dot'] ]
return feat
def feat_road_item_text_norm2(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func1(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return a*b
else:
return np.nan
def func2(ss):
item1 = ss
if ( item1 in item_text ):
item1_text = item_text[item1]
a = np.linalg.norm( item1_text )
return a
else:
return np.nan
feat['road_item_text_product_norm2'] = df[ ['road_item','item'] ].apply(func1, axis=1)
feat['road_item_text_norm2'] = df['road_item'].apply(func2)
feat['item_text_norm2'] = df['item'].apply(func2)
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_automl_cate_count_all_1(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
feat[cate1+'_count_'] = feat[cate1].map( feat[cate1].value_counts() )
cols.append( cate1+'_count_' )
print(f'feat {cate1} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_2(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
name2 = f'{cate1}_{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_3(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
for c in range(b+1,n):
cate3 = categories[c]
name3 = f'{cate1}_{cate2}_{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_time_window_cate_count(data):
if mode=='valid':
all_train_data = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
all_train_data = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_with_time = all_train_data[["item_id", "time"]].sort_values(["item_id", "time"])
item2time = item_with_time.groupby("item_id")["time"].agg(list).to_dict()
utils.dump_pickle(item2time, item2time_path.format(mode))
item2times = utils.load_pickle(item2time_path.format(mode))
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
]]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
try:
item2times = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
feat["item_cnt_around_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.07))
feat["item_cnt_before_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.07))
feat["item_cnt_after_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.07))
feat["item_cnt_around_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.1))
feat["item_cnt_before_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.1))
feat["item_cnt_after_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.1))
feat["item_cnt_around_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.15))
feat["item_cnt_before_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.15))
feat["item_cnt_after_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.15))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
#在召回集内,限定时间(qtime 附近) 这个item被召回了多少次
# item2times 改变了 其他的逻辑不变
def item_recall_cnt_around_qtime(data):
item2times = data.groupby("item")["time"].agg(list).to_dict()
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
new_cols = []
new_col_name = "item_recall_cnt_{}_time_{}"
for delta in [0.01, 0.02, 0.05, 0.07, 0.1, 0.15]:
print('running delta: ', delta)
for mode in ["all", "left", "right"]:
new_col = new_col_name.format(mode, delta)
new_cols.append(new_col)
feat[new_col] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode=mode, delta=delta))
return feat[new_cols]
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item','road_item-item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_topk_by_loc = {}
result_history_loc_diff1_cnt = {}
result_future_loc_diff1_cnt = {}
result_history_loc_diff1_time_mean = {}
result_future_loc_diff1_time_mean = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
result_history_loc_diff1_cnt[key] = 0.0
result_future_loc_diff1_cnt[key] = 0.0
result_history_loc_diff1_time_mean[key] = 0
result_future_loc_diff1_time_mean[key] = 0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result_history_loc_diff1_cnt[key] += 1
result_history_loc_diff1_time_mean[key] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result_future_loc_diff1_cnt[key] += 1
result_future_loc_diff1_time_mean[key] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)))
result_history_loc_diff1_time_mean[key] /=(result_history_loc_diff1_cnt[key]+1e-5)
result_future_loc_diff1_time_mean[key] /=(result_future_loc_diff1_cnt[key]+1e-5)
result_one = sorted(result[key],key=lambda x:x[0])
result_one_len = len(result_one)
result_topk_by_loc[key] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
feat['history_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_history_loc_diff1_time_mean).fillna(0)
feat['future_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_future_loc_diff1_time_mean).fillna(0)
feat['history_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_history_loc_diff1_cnt).fillna(0)
feat['future_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_future_loc_diff1_cnt).fillna(0)
feat_top = []
for key,value in result_topk_by_loc.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_median = {}
result_mean = {}
result_topk = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result[key],reverse=True)
result_one_len = len(result_one)
result_median[key] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result_mean[key] = sum(result[key])/len(result[key])
result_topk[key] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
feat['i2i_cijs_median'] = feat['new_keys'].map(result_median)
feat['i2i_cijs_mean'] = feat['new_keys'].map(result_mean)
feat_top = []
for key,value in result_topk.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean_by_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].sum().reset_index()
df[col+'_by_item_sum'] = df[col]
df = df[ ['item',col+'_by_item_sum'] ]
feat = pd.merge( feat, df, on='item', how='left')
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].mean().reset_index()
df[col+'_by_item_mean'] = df[col]
df = df[ ['item',col+'_by_item_mean'] ]
feat = pd.merge( feat, df, on='item', how='left')
feat = feat[[f'{i}_by_item_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_different_type_road_score_mean_by_road_item(data):
df = data.copy()
feat = df[ ['user','road_item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['road_item',col,'index'] ]
df = df.groupby('road_item')[col].mean().reset_index()
df[col+'_by_road_item_mean'] = df[col]
df = df[ ['road_item',col+'_by_road_item_mean'] ]
feat = pd.merge( feat, df, on='road_item', how='left')
feat = feat[[f'{i}_by_road_item_mean' for i in cols]]
return feat
def feat_different_type_road_score_mean_by_loc_diff(data):
df = data.copy()
feat = df[ ['user','index','sim_weight','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['loc_diff',col,'index'] ]
df = df.groupby('loc_diff')[col].mean().reset_index()
df[col+'_by_loc_diff_mean'] = df[col]
df = df[ ['loc_diff',col+'_by_loc_diff_mean'] ]
feat = pd.merge( feat, df, on='loc_diff', how='left')
feat = feat[[f'{i}_by_loc_diff_mean' for i in cols]]
return feat
def feat_different_type_road_score_sum_mean_by_recall_type_and_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].sum().reset_index()
df[col+'_by_item-recall_type_sum'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_sum'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].mean().reset_index()
df[col+'_by_item-recall_type_mean'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_mean'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
feat = feat[[f'{i}_by_item-recall_type_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_base_info_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
#all_train_stage_data = pd.concat( all_train_stage_data.iloc[0:1000], all_train_stage_data.iloc[-10000:] )
df_train_stage = all_train_stage_data
df = data.copy()
feat = df[ ['index','road_item','item','stage'] ]
stage2sim_item = {}
stage2item_cnt = {}
stage2com_item_cnt = {}
for sta in range(cur_stage+1):
df_train = df_train_stage[ df_train_stage['stage']==sta ]
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
stage2sim_item[sta] = sim_item
stage2item_cnt[sta] = item_cnt
stage2com_item_cnt[sta] = com_item_cnt
sta_list = []
itemb_list = []
sum_sim_list = []
count_sim_list = []
mean_sim_list = []
nunique_itema_count_list = []
for sta in range(cur_stage+1):
for key1 in stage2sim_item[sta].keys():
val = 0
count = 0
for key2 in stage2sim_item[sta][key1].keys():
val += stage2sim_item[sta][key1][key2]
count += stage2com_item_cnt[sta][key1][key2]
sta_list.append( sta )
itemb_list.append( key1 )
sum_sim_list.append( val )
count_sim_list.append( count )
mean_sim_list.append( val/count )
nunique_itema_count_list.append( len( stage2sim_item[sta][key1].keys() ) )
data1 = pd.DataFrame( {'stage':sta_list, 'item':itemb_list, 'sum_sim_in_stage':sum_sim_list, 'count_sim_in_stage':count_sim_list,
'mean_sim_in_stage':mean_sim_list, 'nunique_itema_count_in_stage':nunique_itema_count_list } )
'''
sta_list = []
item_list = []
cnt_list = []
for sta in range(cur_stage+1):
for key1 in stage2item_cnt[sta].keys():
sta_list.append(sta)
item_list.append(key1)
cnt_list.append( stage2item_cnt[sta][key1] )
data2 = pd.DataFrame( {'stage':sta_list, 'road_item':item_list, 'stage_road_item_cnt':cnt_list } )
data3 = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'stage_item_cnt':cnt_list } )
'''
#feat = pd.merge( feat,data1, how='left',on=['stage','road_item','item'] )
#feat = pd.merge( feat,data2, how='left',on=['stage','road_item'] )
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat = feat[ ['sum_sim_in_stage','count_sim_in_stage','mean_sim_in_stage','nunique_itema_count_in_stage'] ]
return feat
def feat_item_time_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','stage','time'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','item_id'] )['time'].agg( ['max','min','mean'] ).reset_index()
data1.columns = [ 'stage','item','time_max_in_stage','time_min_in_stage','time_mean_in_stage' ]
data1['time_dura_in_stage'] = data1['time_max_in_stage'] - data1['time_min_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat['time_diff_min_in_stage'] = feat['time'] - feat['time_min_in_stage']
feat['time_diff_max_in_stage'] = feat['time_max_in_stage'] - feat['time']
cols = [ 'time_dura_in_stage','time_max_in_stage','time_min_in_stage','time_mean_in_stage','time_diff_min_in_stage','time_diff_max_in_stage' ]
feat = feat[ cols ]
return feat
def feat_user_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data1.name = 'user_count_in_stage'
data1 = data1.reset_index()
data1 = data1.rename( columns={'user_id':'user'} )
data2 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].nunique()
data2.name = 'item_nunique_in_stage'
data2 = data2.reset_index()
data2 = data2.rename( columns={'item_id':'item'} )
data3 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].count()
data3.name = 'item_count_in_stage'
data3 = data3.reset_index()
data3 = data3.rename( columns={'item_id':'item'} )
data3[ 'item_ratio_in_stage' ] = data3[ 'item_count_in_stage' ] / data2['item_nunique_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','user'] )
feat = pd.merge( feat,data2, how='left',on=['stage','item'] )
feat = pd.merge( feat,data3, how='left',on=['stage','item'] )
cols = [ 'user_count_in_stage','item_nunique_in_stage','item_ratio_in_stage' ]
feat = feat[ cols ]
return feat
def feat_item_com_cnt_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["road_item", "stage"]]
feat["head"] = feat.set_index(["road_item", "stage"]).index
feat["itema_cnt_in_stage"] = feat["head"].map(item_stage_cnt)
return feat[["itema_cnt_in_stage"]]
def item_cnt_in_stage2(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
return feat[["item_stage_cnt"]]
def feat_item_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["item"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
item_stage_cnt = train_stage_data.groupby(['item_id'])['index'].count()
item_stage_cnt.name = f"item_stage_cnt_{sta}"
item_stage_cnt = item_stage_cnt.reset_index()
item_stage_cnt.columns = ['item',f"item_stage_cnt_{sta}"]
feat = pd.merge( feat,item_stage_cnt,how='left',on='item' )
cols.append( f"item_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["user"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
user_stage_cnt = train_stage_data.groupby(['user_id'])['index'].count()
user_stage_cnt.name = f"user_stage_cnt_{sta}"
user_stage_cnt = user_stage_cnt.reset_index()
user_stage_cnt.columns = ['user',f"user_stage_cnt_{sta}"]
feat = pd.merge( feat,user_stage_cnt,how='left',on='user' )
cols.append( f"user_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_and_item_count_in_three_init_data(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
df_train_stage = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
data1 = df_train_stage.groupby( ['stage','item_id'] )['index'].count()
data1.name = 'in_stage_item_count'
data1 = data1.reset_index()
data1 = data1.rename( columns = {'item_id':'item'} )
data2 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data2.name = 'in_stage_user_count'
data2 = data2.reset_index()
data2 = data2.rename( columns = {'user_id':'user'} )
data3 = df_train_stage.groupby( ['item_id'] )['index'].count()
data3.name = 'no_in_stage_item_count'
data3 = data3.reset_index()
data3 = data3.rename( columns = {'item_id':'item'} )
data4 = df_train_stage.groupby( ['user_id'] )['index'].count()
data4.name = 'no_in_stage_user_count'
data4 = data4.reset_index()
data4 = data4.rename( columns = {'user_id':'user'} )
data5 = df_train.groupby( ['item_id'] )['index'].count()
data5.name = 'no_stage_item_count'
data5 = data5.reset_index()
data5 = data5.rename( columns = {'item_id':'item'} )
data6 = df_train.groupby( ['user_id'] )['index'].count()
data6.name = 'no_stage_user_count'
data6 = data6.reset_index()
data6 = data6.rename( columns = {'user_id':'user'} )
feat = pd.merge( feat,data1,how='left',on=['stage','item'] )
feat = pd.merge( feat,data2,how='left',on=['stage','user'] )
feat = pd.merge( feat,data3,how='left',on=['item'] )
feat = pd.merge( feat,data4,how='left',on=['user'] )
feat = pd.merge( feat,data5,how='left',on=['item'] )
feat = pd.merge( feat,data6,how='left',on=['user'] )
cols = [ 'in_stage_item_count','in_stage_user_count','no_in_stage_item_count','no_in_stage_user_count','no_stage_item_count','no_stage_user_count' ]
return feat[ cols ]
#def feat_item_count_in_three_init_data(data):
def feat_i2i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2i_sim_seq')
i2i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2i_sim_seq.keys():
continue
records = i2i2i_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2i_road_cnt','i2i2i_score1_mean','i2i2i_score2_mean','i2i2i_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_i2i2b_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2b_sim_seq')
i2i2b_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2b_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2b_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2b_sim_seq.keys():
continue
records = i2i2b_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2b_road_cnt','i2i2b_score1_mean','i2i2b_score2_mean','i2i2b_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_numerical_groupby_item_cnt_in_stage(data):
df = data.copy()
num_cols = [ 'sim_weight', 'loc_weight', 'time_weight', 'rank_weight' ]
cate_col = 'item_stage_cnt'
feat = df[ ['index','road_item','item'] ]
feat1 = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'item_cnt_in_stage2_{mode}_{cur_stage}.pkl') )
df[ cate_col ] = feat1[ cate_col ]
feat[ cate_col ] = feat1[ cate_col ]
cols = []
for col in num_cols:
t = df.groupby(cate_col)[col].agg( ['mean','max','min'] )
cols += [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t.columns = [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t = t.reset_index()
feat = pd.merge( feat, t, how='left', on=cate_col )
return feat[ cols ]
#i2i_score,
#
def feat_item_stage_nunique(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_nunique = all_train_stage_data.groupby(["item_id"])["stage"].nunique()
feat = data[["item"]]
feat["item_stage_nunique"] = feat["item"].map(item_stage_nunique)
return feat[["item_stage_nunique"]]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result_history = np.zeros(df_v.shape[0])*np.nan
result_future = np.zeros(df_v.shape[0])*np.nan
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = [0]+item_time_list[df_v[i,0]]+[1]
for j in range(1,len(time_list)):
if time<time_list[j]:
result_future[i] = time_list[j]-time
result_history[i] = time-time_list[j-1]
break
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = item_time_list[df_v[i,0]]+[1]
for j in range(len(time_list)):
if time<time_list[j]:
result[i] = j
break
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_road_time_bins_cate_cnt(data):
df = data.copy()
categoricals = ['item','road_item','user','recall_type']
feat = df[['road_item_time']+categoricals]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categoricals.append('loc_diff')
feat['road_time_bins'] = pd.Categorical(pd.cut(feat['road_item_time'],100)).codes
cols = []
for cate in categoricals:
cnt = feat.groupby([cate,'road_time_bins']).size()
cnt.name = f'{cate}_cnt_by_road_time_bins'
cols.append(cnt.name)
feat = feat.merge(cnt,how='left',on=[cate,'road_time_bins'])
return feat[cols]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
import time as ti
t = ti.time()
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
del df
try:
item_time_list = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
delta_list = np.array(sorted([0.01, 0.02, 0.05, 0.07, 0.1, 0.15]))
delta_list2 = delta_list[::-1]
delta_n = delta_list.shape[0]
n = delta_n*2+1
result_tmp = np.zeros((df_v.shape[0],n))
result_equal = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = np.ones(n)*df_v[i,1]
time[:delta_n] -= delta_list2
time[-delta_n:] += delta_list
time_list = item_time_list[df_v[i,0]]+[10]
k = 0
for j in range(len(time_list)):
while k<n and time[k]<time_list[j] :
result_tmp[i,k] = j
k += 1
if time[delta_n]==time_list[j]:
result_equal[i] += 1
result_tmp[i,k:] = j
if i%100000 == 0:
print(f'[{i}/{df_v.shape[0]}]:time {ti.time()-t:.3f}s')
t = ti.time()
result = np.zeros((df_v.shape[0],delta_n*3))
for i in range(delta_n):
result[:,i*3+0] = result_tmp[:,delta_n] - result_tmp[:,i]
result[:,i*3+1] = result_tmp[:,-(i+1)] - result_tmp[:,delta_n] + result_equal
result[:,i*3+2] = result_tmp[:,-(i+1)] - result_tmp[:,i]
cols = [f'item_cnt_{j}_time_{i}' for i in delta_list2 for j in ['before','after','around']]
result = pd.DataFrame(result,columns=cols)
result = result[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
return result
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = max(item_feat.keys())+1
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
result = np.divide(result,item_l2[feat['road_item']]*item_l2[feat['item']]+1e-9)
feat['road_item_text_cossim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_cossim'] = np.nan
return feat[['road_item_text_cossim']]
def feat_road_item_text_eulasim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.linalg.norm(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:]-item_np[feat['item'][i*batch_size:(i+1)*batch_size],:],axis=1)
feat['road_item_text_eulasim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_eulasim'] = np.nan
return feat[['road_item_text_eulasim']]
def feat_road_item_text_dot(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
feat['road_item_text_dot'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_dot'] = np.nan
return feat[['road_item_text_dot']]
def feat_road_item_text_norm2(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
#result[i] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]] + [np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
#median,mean,topk
result = np.zeros((len(new_keys),2+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result_one,reverse=True)
result_one_len = len(result_one)
result[i,0] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result[i,1] = sum(result_one)/(len(result_one))
result[i,2:] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
df = feat[ ['index','user','item','i2i_score','blend_score','i2i2i_score'] ]
df = df.groupby( ['user','item'] )[ ['i2i_score','blend_score','i2i2i_score'] ].agg( ['sum','mean'] ).reset_index()
df.columns = ['user','item'] + [ f'{i}_{j}' for i in ['i2i_score','blend_score','i2i2i_score'] for j in ['sum','mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['recall_type','road_item','item']).size()
tmp.name = 'recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['recall_type','road_item','item'])
cols.append(tmp.name)
print('feat recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['loc_diff','road_item','item']).size()
tmp.name = 'loc_diff-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['loc_diff','road_item','item'])
cols.append(tmp.name)
print('feat loc_diff road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
tmp = feat.groupby(['user','recall_type','road_item','item']).size()
tmp.name = 'user-recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['user','recall_type','road_item','item'])
cols.append(tmp.name)
print('feat user recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array(v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result = np.zeros(df.shape[0])
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result[int(v[k,0])] = j
k += 1
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array([0]+v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result_history = np.zeros(df.shape[0])*np.nan
result_future = np.zeros(df.shape[0])*np.nan
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(1,len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result_future[int(v[k,0])] = time_list[j]-v[k,1]
result_history[int(v[k,0])] = v[k,1]-time_list[j-1]
k += 1
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_sim_three_weight_no_clip(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
else:
time_weight = (1 - (t2 - t1) * 100)
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum_no_clip'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum_no_clip'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean_no_clip'] = feat['com_item_loc_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_time_weights_mean_no_clip'] = feat['com_item_time_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum_no_clip','com_item_time_weights_sum_no_clip',
'com_item_loc_weights_mean_no_clip','com_item_time_weights_mean_no_clip', ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_road_item_before_and_after_query_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time','query_item_time']]
feat_h = feat.loc[feat['road_item_time']<feat['query_item_time']]
feat_f = feat.loc[feat['road_item_time']>feat['query_item_time']]
feat_h = feat_h.groupby(['user','road_item_loc']).first().reset_index()
feat_f = feat_f.groupby(['user','road_item_loc']).first().reset_index()
feat_h_group = feat_h.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat_f_group = feat_f.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_h_group['road_item_time'].diff(1)
feat2 = feat_h_group['road_item_time'].diff(-1)
feat3 = feat_f_group['road_item_time'].diff(1)
feat4 = feat_f_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_before_query_time_diff_history'
feat2.name = 'u2i_road_item_before_query_time_diff_future'
feat3.name = 'u2i_road_item_after_query_time_diff_history'
feat4.name = 'u2i_road_item_after_query_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2,feat3,feat4],axis=1),how='left',on=['user','road_item_loc'])
cols = ['u2i_road_item_before_query_time_diff_history',
'u2i_road_item_before_query_time_diff_future',
'u2i_road_item_after_query_time_diff_history',
'u2i_road_item_after_query_time_diff_future']
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc_new(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[:,1]/=(result[:,3]+1e-5)
result[:,0]/=(result[:,2]+1e-5)
cols = ['history_loc_diff1_com_item_time_mean_new',
'future_loc_diff1_com_item_time_mean_new',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ ['history_loc_diff1_com_item_time_mean_new','future_loc_diff1_com_item_time_mean_new'] ]
return feat
def feat_items_list_len(data):
df = data.copy()
feat = df[ ['index','user','left_items_list','right_items_list','stage'] ]
def func(s):
return len(s)
tdata = feat.groupby('user').first()
tdata['left_items_list_len'] = tdata['left_items_list'].apply( func )
tdata['right_items_list_len'] = tdata['right_items_list'].apply( func )
import pdb
pdb.set_trace()
return feat
def feat_item_cnt_in_stage2_mean_max_min_by_user(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["user","item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
tmp = feat.groupby('user')['item_stage_cnt'].agg(['mean','max','min'])
tmp.columns = [f'item_cnt_in_stage2_{i}_by_user' for i in tmp.columns]
feat = feat.merge(tmp,how='left',on='user')
return feat[tmp.columns]
def feat_item_seq_sim_cossim_text(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 10000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_textsim_max','left_allitem_item_textsim_sum'])
df_left['left_allitem_textsim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_textsim_max','right_allitem_item_textsim_sum'])
df_right['right_allitem_textsim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_textsim_max'] = feat[['left_allitem_item_textsim_max','right_allitem_item_textsim_max']].max(axis=1)
feat['allitem_item_textsim_sum'] = feat[['left_allitem_item_textsim_sum','right_allitem_item_textsim_sum']].sum(axis=1)
feat['allitem_item_textsim_len'] = feat[['left_allitem_textsim_len','right_allitem_textsim_len']].sum(axis=1)
feat['allitem_item_textsim_mean'] = feat['allitem_item_textsim_sum']/(feat['allitem_item_textsim_len']+1e-9)
return feat[['allitem_item_textsim_max','allitem_item_textsim_mean']]
def feat_item_seq_sim_cossim_image(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[1]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 10000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_imagesim_max','left_allitem_item_imagesim_sum'])
df_left['left_allitem_imagesim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_imagesim_max','right_allitem_item_imagesim_sum'])
df_right['right_allitem_imagesim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_imagesim_max'] = feat[['left_allitem_item_imagesim_max','right_allitem_item_imagesim_max']].max(axis=1)
feat['allitem_item_imagesim_sum'] = feat[['left_allitem_item_imagesim_sum','right_allitem_item_imagesim_sum']].sum(axis=1)
feat['allitem_item_imagesim_len'] = feat[['left_allitem_imagesim_len','right_allitem_imagesim_len']].sum(axis=1)
feat['allitem_item_imagesim_mean'] = feat['allitem_item_imagesim_sum']/(feat['allitem_item_imagesim_len']+1e-9)
return feat[['allitem_item_imagesim_max','allitem_item_imagesim_mean']]
def feat_i2i_sim_on_hist_seq(data):
# get i2i similarities dict
# 没用
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
print("compute i2i sim end.")
max_i2i_sim_arr = np.zeros(len(data))
mean_i2i_sim_arr = np.zeros(len(data))
# 还可以做过去N次点击,过去N时间内的统计
for i, (left_seq, right_seq, item) in enumerate(zip(data["left_items_list"].values, data["right_items_list"].values, data["item"].values)):
if i % 100000 == 0:
print("{} in length {}".format(i, len(data)))
seq_i2i_sim = []
for h_item in left_seq + right_seq:
sim_item[h_item].setdefault(item, 0)
seq_i2i_sim.append(sim_item[h_item][item])
max_i2i_sim_arr[i] = max(seq_i2i_sim) if len(left_seq) > 0 else np.nan
mean_i2i_sim_arr[i] = sum(seq_i2i_sim) / len(left_seq) if len(left_seq) > 0 else np.nan
feat = data[["item"]]
feat["max_i2i_sim_arr"] = max_i2i_sim_arr
feat["mean_i2i_sim_arr"] = mean_i2i_sim_arr
return feat[[
"max_i2i_sim_arr", "mean_i2i_sim_arr"
]]
def feat_item_seq_sim_cossim_text(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 30000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
len_max_nums = 300
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_textsim_max','left_allitem_item_textsim_sum'])
df_left['left_allitem_textsim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
len_max_nums = 80
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_textsim_max','right_allitem_item_textsim_sum'])
df_right['right_allitem_textsim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_textsim_max'] = feat[['left_allitem_item_textsim_max','right_allitem_item_textsim_max']].max(axis=1)
feat['allitem_item_textsim_sum'] = feat[['left_allitem_item_textsim_sum','right_allitem_item_textsim_sum']].sum(axis=1)
feat['allitem_item_textsim_len'] = feat[['left_allitem_textsim_len','right_allitem_textsim_len']].sum(axis=1)
feat['allitem_item_textsim_mean'] = feat['allitem_item_textsim_sum']/(feat['allitem_item_textsim_len']+1e-9)
return feat[['allitem_item_textsim_max','allitem_item_textsim_mean']]
def feat_item_seq_sim_cossim_image(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[1]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 30000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
len_max_nums = 300
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_imagesim_max','left_allitem_item_imagesim_sum'])
df_left['left_allitem_imagesim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
len_max_nums = 80
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_imagesim_max','right_allitem_item_imagesim_sum'])
df_right['right_allitem_imagesim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_imagesim_max'] = feat[['left_allitem_item_imagesim_max','right_allitem_item_imagesim_max']].max(axis=1)
feat['allitem_item_imagesim_sum'] = feat[['left_allitem_item_imagesim_sum','right_allitem_item_imagesim_sum']].sum(axis=1)
feat['allitem_item_imagesim_len'] = feat[['left_allitem_imagesim_len','right_allitem_imagesim_len']].sum(axis=1)
feat['allitem_item_imagesim_mean'] = feat['allitem_item_imagesim_sum']/(feat['allitem_item_imagesim_len']+1e-9)
return feat[['allitem_item_imagesim_max','allitem_item_imagesim_mean']]
def feat_i2i_sim_on_hist_seq(data):
# get i2i similarities dict
# 没用
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
print("compute i2i sim end.")
max_i2i_sim_arr = np.zeros(len(data))
mean_i2i_sim_arr = np.zeros(len(data))
left_len_max_nums = 300
right_len_max_nums = 80
# 还可以做过去N次点击,过去N时间内的统计
for i, (left_seq, right_seq, item) in enumerate(zip(data["left_items_list"].values, data["right_items_list"].values, data["item"].values)):
if i % 100000 == 0:
print("{} in length {}".format(i, len(data)))
seq_i2i_sim = []
left_seq = left_seq[:left_len_max_nums]
right_seq = right_seq[:right_len_max_nums]
left_right_seq = left_seq+right_seq
for h_item in left_right_seq:
sim_item[h_item].setdefault(item, 0)
seq_i2i_sim.append(sim_item[h_item][item])
max_i2i_sim_arr[i] = max(seq_i2i_sim) if len(left_right_seq) > 0 else np.nan
mean_i2i_sim_arr[i] = sum(seq_i2i_sim) / len(left_right_seq) if len(left_right_seq) > 0 else np.nan
feat = data[["item"]]
feat["max_i2i_sim_arr"] = max_i2i_sim_arr
feat["mean_i2i_sim_arr"] = mean_i2i_sim_arr
return feat[[
"max_i2i_sim_arr", "mean_i2i_sim_arr"
]]
def feat_item_max_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['max'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['max'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_different_type_road_score_max(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
df = feat[ ['index','user','item','i2i_score','blend_score','i2i2i_score'] ]
df = df.groupby( ['user','item'] )[ ['i2i_score','blend_score','i2i2i_score'] ].agg( ['max'] ).reset_index()
df.columns = ['user','item'] + [ f'{i}_{j}' for i in ['i2i_score','blend_score','i2i2i_score'] for j in ['max'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ ['i2i_score_max','blend_score_max','i2i2i_score_max',] ]
return feat
def feat_different_type_road_score_max_by_item(data):
df = data.copy()
feat = df[ ['item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
df = feat[ ['index','item','i2i_score','blend_score','i2i2i_score'] ]
df = df.groupby( ['item'] )[ ['i2i_score','blend_score','i2i2i_score'] ].agg( ['max'] ).reset_index()
df.columns = ['item'] + [ f'{i}_{j}_by_item' for i in ['i2i_score','blend_score','i2i2i_score'] for j in ['max'] ]
feat = pd.merge( feat, df, on=['item'], how='left')
feat = feat[ ['i2i_score_max_by_item','blend_score_max_by_item','i2i2i_score_max_by_item',] ]
return feat
def feat_item_sum_mean_max_i2i2i_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item','user'] ]
print('Loading i2i2i_sim_seq')
i2i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros(len(new_keys))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2i_sim_seq.keys():
result[i] = np.nan
continue
records = i2i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i] += score1_1*score1_2
print('Finished getting result')
result = pd.DataFrame(result,index=new_keys,columns=['i2i2i_score1_sum'])
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
tmp = feat.groupby( ['user','item'] )[ ['i2i2i_score1_sum'] ].agg( ['sum','mean','max'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['i2i2i_weight'] for j in ['sum','mean','max'] ]
tmp.columns = [ 'user','item' ]+ cols
feat = | pd.merge( feat, tmp, on=['user','item'], how='left') | pandas.merge |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
self.assertEqual(result, expected)
# self.assertRaises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert(rs == xp)
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
self.assertTrue(new_index.equals(exp_index))
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
self.assertEqual(loc, expected)
self.assertIsNone(new_index)
self.assertRaises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)],
labels=[np.array([0, 0, 0, 0]),
np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
self.assertTrue(new_index.equals(index.droplevel(0)))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
(1, 3))
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
df.index[5] + timedelta(seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
assertRaisesRegexp(KeyError, "[Kk]ey length.*greater than MultiIndex"
" lexsort depth", index.slice_locs, (1, 0, 1),
(2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
self.assertEqual(result, (1, 5))
result = sorted_idx.slice_locs(None, ('qux', 'one'))
self.assertEqual(result, (0, 5))
result = sorted_idx.slice_locs(('foo', 'two'), None)
self.assertEqual(result, (1, len(sorted_idx)))
result = sorted_idx.slice_locs('bar', 'baz')
self.assertEqual(result, (2, 4))
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]],
sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(1, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((2, 2), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(2, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((1, 0), (6, 3))
self.assertEqual(result, (3, 8))
result = index.slice_locs(-1, 10)
self.assertEqual(result, (0, len(index)))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(index.is_unique)
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
self.assertNotIn('foo', result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(after=1)
self.assertNotIn(2, result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(before=1, after=2)
self.assertEqual(len(result.levels[0]), 2)
# after < before
self.assertRaises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
| assert_almost_equal(r1, [-1, 0, 0, 1, 1]) | pandas.util.testing.assert_almost_equal |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <elheat_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E'])
)
update vital_signs.data
set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: empl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator
#input: Year
#output:
import pandas as pd
import glob
def empl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64
#/
#nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <empl_14> */ --
WITH tbl AS (
select csa,
( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E'])
)
update vital_signs.data
set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: fam.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def fam( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Delete Unassigned--Jail
df = df[df.index != 'Unassigned--Jail']
# Move Baltimore to Bottom
bc = df.loc[ 'Baltimore City' ]
df = df.drop( df.index[1] )
df.loc[ 'Baltimore City' ] = bc
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
return df1['18Under']
# Cell
#File: female.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def female( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ]
return df1['onlyTheLadies']
# Cell
#File: femhhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: male, hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def femhhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
str19 = rootStr + ',_no_spouse_present'
femhh = str17 if year == '17' else str19 if year == '19' else str16
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100
df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households']
df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households']
df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18']
return df1['FemaleHH']
# Cell
#File: heatgas.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def heatgas( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_002E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <heatgas_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E'])
)
update vital_signs.data
set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hh40inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 25K-40K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh40inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh40inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_006E','B19001_007E','B19001_008E','B19001_001E'])
)
UPDATE vital_signs.data
set hh40inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh60inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household 45-60K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh60inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh60inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_009E','B19001_010E','B19001_011E','B19001_001E'])
)
UPDATE vital_signs.data
set hh60inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh75inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 60-70K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh75inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
#12/1
return fi.apply(lambda x: ( x[fi.columns[1] ] / x[fi.columns[0]])*100, axis=1)
"""
/* hh75inc */ --
WITH tbl AS (
select csa,
( value[1] / value[2] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_012E','B19001_001E'])
)
UPDATE vital_signs.data
set hh75inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hhchpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17001 - POVERTY STATUS IN THE PAST 12 MONTHS BY SEX BY AGE
# Universe: Population for whom poverty status is determined more information
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhchpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] #Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1701_C03_002E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
/* <hhchpov_14> */
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12])
/ nullif(
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16] + value[17] + value[18] + value[19] + value[20] + value[21] + value[22] + value[23] + value[24] ),
0)
) * 100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B17001_004E','B17001_005E','B17001_006E','B17001_007E','B17001_008E','B17001_009E','B17001_018E','B17001_019E','B17001_020E','B17001_021E','B17001_022E','B17001_023E','B17001_033E','B17001_034E','B17001_035E','B17001_036E','B17001_037E','B17001_038E','B17001_047E','B17001_048E','B17001_049E','B17001_050E','B17001_051E','B17001_052E'])
)
update vital_signs.data
set hhchpov = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hhm75.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income Over 75K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhm75( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 002
key = getColName(df, '002')
val = getColByName(df, '002')
fi[key] = val
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 004
key = getColName(df, '004')
val = getColByName(df, '004')
fi[key] = val
# append into that dataframe col 005
key = getColName(df, '005')
val = getColByName(df, '005')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[0]]-( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ]+ x[fi.columns[4] ]+ x[fi.columns[5] ]+ x[fi.columns[6] ]+ x[fi.columns[7] ]+ x[fi.columns[8] ]+ x[fi.columns[9] ]+ x[fi.columns[10] ]+ x[fi.columns[11] ] ) ) / x[fi.columns[0]])*100, axis=1)
# Cell
#File: hhpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17017 - Household Poverty, Uses Table B17017 which includes V
# Poverty Status in the Past 12 Months by Household Type by Age of Householder (Universe = households)
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17017*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 032
key = getColName(df, '032')
val = getColByName(df, '032')
fi[key] = val
# construct the denominator, returns 0 iff the other two rows are equal.
fi['denominator'] = nullIfEqual( df, '003', '032')
# Delete Rows where the 'denominator' column is 0
fi = fi[fi['denominator'] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: (x[fi.columns[0]] / x['denominator'])*100, axis=1)
# Cell
#File: hhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def hhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['tot'] = df[ 'B11005_001E_Total' ]
return df1['tot']
# Cell
#File: hsdipl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With High School Diploma and Some College or Associates Degree
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def hsdipl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_003E','B06009_004E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_003E','B06009_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( ( value[1] + value[2] ) / nullif(value[3],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <hsdipl_14> */ --
WITH tbl AS (
select csa,
( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E'])
)
update vital_signs.data
set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: lesshs.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With Less Than a High School Diploma or GED Indicator
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def lesshs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_002E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <lesshs_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_002E','B06009_001E'])
)
update vital_signs.data
set lesshs = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: male.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def male( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheFellas'] = df[ 'B01001_002E_Total_Male' ]
return df1['onlyTheFellas']
# Cell
#File: nilf.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Not in Labor Force Indicator
#input: Year
#output:
import pandas as pd
import glob
def nilf( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64
# /
# nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <nilf_14> */ --
WITH tbl AS (
select csa,
( (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014', ARRAY['B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_009E','B23001_016E','B23001_023E','B23001_030E','B23001_037E','B23001_044E','B23001_051E','B23001_058E','B23001_065E','B23001_072E','B23001_095E','B23001_102E','B23001_109E','B23001_116E','B23001_123E','B23001_130E','B23001_137E','B23001_144E','B23001_151E','B23001_158E'])
)
update vital_signs.data
set nilf = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: othrcom.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 years and over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population Using Other Means to Commute to Work (Taxi, Motorcycle, Bicycle, Other) Indicator
#input: Year
#output:
import pandas as pd
import glob
def othrcom( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_041E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_041E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
# 100- "6.7", "59.8", "9.2", "18.4", "3.7", = 2.2
# 100- (walked + drvalone + carpool + pubtran + workfromhome(13e))
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_010E,S0801_C01_003E,S0801_C01_004E,S0801_C01_009E,S0801_C01_013E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
walked = float(table.loc[1, table.columns[1]] )
drvalone = float(table.loc[1, table.columns[2]] )
carpool = float(table.loc[1, table.columns[3]] )
pubtran = float(table.loc[1, table.columns[4]] )
workfromhome = float(table.loc[1, table.columns[5]] )
fi['final']['Baltimore City'] = 100 - ( walked + drvalone + carpool + pubtran + workfromhome )
return fi['final']
"""
/* <othrcom_14> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_041E'])
)
update vital_signs.data
set othrcom = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: p2more.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def p2more( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['TwoOrMore%NH'] = df['B03002_009E_Total_Not_Hispanic_or_Latino_Two_or_more_races'] / tot * 100
return df1['TwoOrMore%NH']
# Cell
#File: pubtran.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Uses Public Transportation to Get to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def pubtran( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_025E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_025E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = | pd.DataFrame() | pandas.DataFrame |
import os
import string
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import multiprocessing as mp
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
import seaborn as sns
import scipy.stats as stats
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
INT_MIN = 0.
INT_MAX = 250.
D_INT = 5.
ACC = 0.99
INTS = np.arange(INT_MIN, INT_MAX, D_INT)
FIGS_DIR = '/home/johnmcbride/Dropbox/phd/LaTEX/Scales/Figures/'
def gaussian(x, mean, var):
return np.exp( - (x - mean)**2 / (2. * var)) / (var * 2 * np.pi)**0.5
def integrate_gauss(mean, var, x1, x2, num=1000):
X = np.linspace(x1, x2, num=num)
P = gaussian(X, mean, var)
# return X, P
return np.trapz(P, X)
def get_percentage_correct_from_range_of_ints(dI, prod_var, percep_var, ints=INTS):
correct = []
for I0 in ints:
int_cats = np.arange(0, I0*2, dI)
prob_produced = []
prob_correct = []
for I1 in int_cats:
prod_prob = integrate_gauss(I0, prod_var, I1-dI/2., I1+dI/2.)
percep_prob = [integrate_gauss(i, percep_var, I1-dI/2., I1+dI/2.) for i in [0, I0, I0*2]]
prob_produced.append(prod_prob)
prob_correct.append(percep_prob[1] / sum(percep_prob))
correct.append(np.sum(np.array(prob_produced) * np.array(prob_correct)) / np.sum(prob_produced))
return np.array(correct)
def get_percentage_correct_from_range_of_ints_2(dI, prod_var, percep_var, ints=INTS):
correct = []
for I0 in ints:
int_cats = np.arange(0, I0*2, dI)
prob_produced = []
prob_correct = []
for I1 in int_cats:
prod_prob = integrate_gauss(I0, prod_var, I1-dI/2., I1+dI/2.)
percep_prob = [integrate_gauss(i, percep_var, I1-dI/2., I1+dI/2.) for i in [0, I0]]
prob_produced.append(prod_prob)
prob_correct.append(percep_prob[1] / sum(percep_prob))
correct.append(np.sum(np.array(prob_produced) * np.array(prob_correct)) / np.sum(prob_produced))
return np.array(correct)
def get_interval_by_accuracy(ints, correct, acc=ACC):
try:
i = np.where(correct > acc)[0][0]
except:
i = np.argmin(np.abs(correct - acc))
if i:
return ints[i-1] + (ints[i] - ints[i-1]) * (acc - correct[i-1]) / (correct[i] - correct[i-1])
else:
return ints[0]
def plot_distinguishability_by_grid_size():
dI = 5
dI_arr = [3, 5, 10, 20, 25, 30]
prod_sdev_arr = np.arange(5., 32.5, 2.5)
percep_sdev_arr = np.arange(5., 32.5, 2.5)
fig1, ax1 = plt.subplots(2,3)
ax1 = ax1.reshape(ax1.size)
df_list = []
for i, dI in enumerate(dI_arr):
xi, yi = np.meshgrid(prod_sdev_arr, percep_sdev_arr)
prod_in = xi.ravel()
percep_in = yi.ravel()
pool = mp.Pool(24)
correct = pool.starmap(get_percentage_correct_from_range_of_ints, [(dI, prod_in[i]**2, percep_in[i]**2) for i in range(len(prod_in))])
thresh_list = [get_interval_by_accuracy(INTS, c) for c in correct]
df_list.append(pd.DataFrame(data={'production':prod_in, 'perception':percep_in, 'threshold':thresh_list, 'dI':[dI]*prod_in.size}))
sns.heatmap(df_list[i].pivot('production', 'perception', 'threshold'), ax=ax1[i], vmin=50, vmax=180, annot=True)
ax1[i].invert_yaxis()
ax1[i].set_title(f"dI = {dI}")
# plt.legend(loc='best')
# plt.plot(np.arange(50, 550, 50), thresh_int)
plt.show()
def plot_distinguishability_ranges():
dI = 5
min_prod = [5., 10., 30.]
min_per = [10., 20., 40.]
rang = 27.5
titles = ['expert', 'good_untrained', 'bad_untrained']
fig, ax = plt.subplots(3)
for i in range(3):
prod_sdev_arr = np.arange(min_prod[i], min_prod[i]+rang, 2.5)
percep_sdev_arr = np.arange(min_per[i], min_per[i]+rang, 2.5)
xi, yi = np.meshgrid(prod_sdev_arr, percep_sdev_arr)
prod_in = xi.ravel()
percep_in = yi.ravel()
pool = mp.Pool(28)
correct = pool.starmap(get_percentage_correct_from_range_of_ints, [(dI, prod_in[j]**2, percep_in[j]**2) for j in range(len(prod_in))])
thresh_list = [get_interval_by_accuracy(INTS, c) for c in correct]
annot = np.zeros(xi.shape, dtype='<U3')
np.fill_diagonal(annot, [str(int(x)) for x in np.array(thresh_list).reshape(xi.shape).T.diagonal()])
df = pd.DataFrame(data={'production':prod_in, 'perception':percep_in, 'threshold':thresh_list, 'dI':[dI]*prod_in.size})
sns.heatmap(df.pivot('production', 'perception', 'threshold'), ax=ax[i], vmin=50, vmax=180, annot=annot, fmt="s")
ax[i].invert_yaxis()
ax[i].set_title(titles[i])
plt.show()
def plot_distinguishability_ranges_one_plot():
dI = 5
min_prod = [10., 20., 40.]
min_per = [10., 20., 40.]
rang = 27.5
titles = ['expert', 'good_untrained', 'bad_untrained']
fig, ax = plt.subplots()
prod_sdev_arr = np.arange(5, 57.5, 5)
percep_sdev_arr = np.arange(5, 57.5, 5)
xi, yi = np.meshgrid(prod_sdev_arr, percep_sdev_arr)
prod_in = xi.ravel()
percep_in = yi.ravel()
pool = mp.Pool(28)
correct = pool.starmap(get_percentage_correct_from_range_of_ints, [(dI, prod_in[j]**2, percep_in[j]**2) for j in range(len(prod_in))])
thresh_list = [get_interval_by_accuracy(INTS, c) for c in correct]
annot = np.zeros(xi.shape, dtype='<U3')
np.fill_diagonal(annot, [str(int(x)) for x in np.array(thresh_list).reshape(xi.shape).T.diagonal()])
np.save('Results/annotations', annot)
df = pd.DataFrame(data={'production':prod_in, 'perception':percep_in, 'threshold':thresh_list, 'dI':[dI]*prod_in.size})
df.to_feather(f'Results/three_notes_acc{ACC}.feather')
xticks = np.arange(5, 55, 5)
yticks = np.arange(5, 55, 5)
sns.heatmap(df.pivot('production', 'perception', 'threshold'), ax=ax, vmin=30, vmax=300, annot=annot, fmt="s", xticklabels=xticks, yticklabels=yticks)
ax.invert_yaxis()
ax_scale = 5.0
ax.set_xticks((np.arange(5, 55, 5)-2.5)/ax_scale)
ax.set_yticks((np.arange(5, 55, 5)-2.5)/ax_scale)
plt.savefig('Figs/accurate_intervals.png', dpi=1200)
plt.savefig('Figs/accurate_intervals.pdf', dpi=1200)
# plt.show()
def plot_distinguishability_two_notes():
dI = 5
min_prod = [10., 20., 40.]
min_per = [10., 20., 40.]
rang = 27.5
titles = ['expert', 'good_untrained', 'bad_untrained']
fig, ax = plt.subplots()
prod_sdev_arr = np.arange(2.5, 57.5, 2.5)
percep_sdev_arr = np.arange(2.5, 57.5, 2.5)
xi, yi = np.meshgrid(prod_sdev_arr, percep_sdev_arr)
prod_in = xi.ravel()
percep_in = yi.ravel()
pool = mp.Pool(28)
correct = pool.starmap(get_percentage_correct_from_range_of_ints_2, [(dI, prod_in[j]**2, percep_in[j]**2) for j in range(len(prod_in))])
thresh_list = [get_interval_by_accuracy(INTS, c) for c in correct]
annot = np.zeros(xi.shape, dtype='<U3')
np.fill_diagonal(annot, [str(int(x)) for x in np.array(thresh_list).reshape(xi.shape).T.diagonal()])
df = pd.DataFrame(data={'production':prod_in, 'perception':percep_in, 'threshold':thresh_list, 'dI':[dI]*prod_in.size})
xticks = np.arange(5, 55, 5)
yticks = np.arange(5, 55, 5)
sns.heatmap(df.pivot('production', 'perception', 'threshold'), ax=ax, vmin=30, vmax=300, annot=annot, fmt="s", xticklabels=xticks, yticklabels=yticks)
ax.invert_yaxis()
ax_scale = 2.5
ax.set_xticks((np.arange(5, 55, 5)-2.5)/ax_scale)
ax.set_yticks((np.arange(5, 55, 5)-2.5)/ax_scale)
plt.savefig('Figs/two_notes_accurate_intervals.png', dpi=1200)
plt.savefig('Figs/two_notes_accurate_intervals.pdf', dpi=1200)
# plt.show()
def plot_frac_correct():
fig, ax = plt.subplots()
dI = 2
for std in [5, 10, 20, 40]:
correct = get_percentage_correct_from_range_of_ints(dI, std**2, std**2)
ax.plot(INTS, correct, label=r"$\sigma = {0}$".format(std))
ax.legend(loc='best', frameon=False)
plt.show()
def plot_heatmap():
fig, ax = plt.subplots()
df = pd.read_feather(f'Results/three_notes_acc{ACC}.feather')
annot = np.load('Results/annotations.npy')
xticks = np.arange(5, 55, 5)
yticks = np.arange(5, 55, 5)
sns.heatmap(df.pivot('production', 'perception', 'threshold'), ax=ax, vmin=30, vmax=300, annot=annot, fmt="s", xticklabels=xticks, yticklabels=yticks)
ax.invert_yaxis()
ax_scale = 5.0
ax.set_xticks((np.arange(5, 55, 5)-2.5)/ax_scale)
ax.set_yticks((np.arange(5, 55, 5)-2.5)/ax_scale)
plt.savefig('Figs/accurate_intervals.png', dpi=1200)
plt.savefig('Figs/accurate_intervals.pdf', dpi=1200)
def plot_SI():
fig = plt.figure(figsize=(10,5))
gs = gridspec.GridSpec(2,3, width_ratios=[1.0, 1.0, 1.8], height_ratios=[1.0, 1.0])
gs.update(wspace=0.30 ,hspace=0.40)
ax = [fig.add_subplot(gs[0,0]),fig.add_subplot(gs[1,0]),fig.add_subplot(gs[:,1]),fig.add_subplot(gs[:,2])]
std = 20
X = np.linspace(0, 200, num=1000)
ax[0].plot(X, stats.norm.pdf(X, 100, std), label=f"Category A", c='k')
col = ['k'] + list(np.array(Paired_12.mpl_colors)[[1,3,5]])
cat = [f"Category {s}" for s in 'ABC']
Y = []
for i, mu in enumerate([100, 50, 150]):
Y.append(stats.norm.pdf(X, mu, std))
# ax[1].plot(X, stats.norm.pdf(X, mu, std), label=cat[i], c=col[i])
Y = np.array(Y)
ysum = np.sum(Y, axis=0)
Y = Y/ysum
for i, mu in enumerate([100, 50, 150]):
ax[1].plot(X, Y[i], '-', label=cat[i], c=col[i])
ax[0].set_xlabel("Produced interval")
ax[1].set_xlabel("Produced interval")
ax[0].set_ylabel("Probability")
ax[1].set_ylabel(r"$P_{Cat}$")
ax[0].set_ylim(0, 0.035)
ax[1].set_ylim(0, 1.70)
ax[0].set_yticks([])
ax[1].set_yticks([0,1])
for a in ax[:2]:
a.legend(loc='upper right', frameon=False)
dI = 2
for i, std in enumerate([5, 10, 20, 40]):
correct = get_percentage_correct_from_range_of_ints(dI, std**2, std**2)
line, = ax[2].plot(INTS, correct, label=r"$\sigma = {0}$".format(std), c='k')
line.set_dashes([12-i*2-3, 3+i*0])
ax[2].legend(loc='best', frameon=False)
ax[2].plot([0,250],[.99]*2, '-', c=col[3], alpha=0.7)
ax[2].set_xlim(0, 250)
ax[2].set_xlabel(r'$I_{\textrm{min}}$')
ax[2].set_ylabel("Fraction correctly perceived")
df = | pd.read_feather(f'Results/three_notes_acc{ACC}.feather') | pandas.read_feather |
import os
import sys
import pandas as pd
import time
import multiprocessing
class my_dictionary(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
def chromosomes():
chr = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13',
'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY']
return(chr)
def create_dir(path):
if not os.path.isdir(path):
os.system(f'mkdir {path}')
def time_date_id():
import time
cur_time = time.asctime()
cur_time = cur_time.replace(":", "-")
cur_time = cur_time.replace(" ", "_")
return(cur_time)
def fix_clusters_represent(clust, representatives, tpm_threshold, ttl_reads):
df1 = | pd.DataFrame.from_records(clust) | pandas.DataFrame.from_records |
def moving_average(series, window=100, sigma=50):
'''
Calculate the moving average of a series using scipy. Used by continuumFlattenSpec.
Borrowed from http://www.nehalemlabs.net/prototype/blog/2014/04/12/how-to-fix-scipys-interpolating-spline-default-behavior/
'''
from scipy.signal import gaussian
from scipy.ndimage import filters
b = gaussian(window, sigma)
average = filters.convolve1d(series, b/b.sum())
var = filters.convolve1d(np.power(series-average,2), b/b.sum())
return average, var
def getOrderWavelengths(hdu):
'''
Retrieve wavelength arrays for a multiorder echelle spectra.
ONLY LINEAR, LOG, OR CHEBYSHEV POLYNOMIAL DISPERSIONS ARE SUPPORTED FOR NOW
input: FITS hdu object
output: list of wavelength arrays, one per order
some code is from https://github.com/kgullikson88/General/blob/master/readmultispec.py
'''
import numpy as np
from astropy.io import fits
header = hdu[0].header
#print(list(header.keys()))
# before things get interesting, try a normal linear dispersion
try:
dwave = header['cdelt1']
wavestart = header['crval1']
wavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(wavestart, wavestop, dwave)
except KeyError:
pass
# OK then, each order has a set of N=lenwat useful parameters hidden in the WAT2 header entries
WAT2string = ''
for key in header.keys():
if 'WAT2_' in key: # build a giant string
#print(key, header[key])
if len(header[key]) < 68: # catch the entries that need a trailing space added (length 67)
WAT2string += header[key] + ' '
elif len(header[key]) > 68:
raise ValueError('Length of header entry {0} is {1}; do you even FITS?!'.format(key, len(header[key])))
else:
WAT2string += header[key]
#print(WAT2string)
WAT2list = [] # use the giant string to create a list we can slice into orders
lenwatlist = []
for item in WAT2string.split("\""):
if 'spec' not in item:
lenwatlist.append(len(item.split(' ')))
WAT2list.extend(item.split(' '))
norders = len(hdu[0].data)
lenwat = lenwatlist[1]
WAT2orders = [] # placeholder list for info in each order
for idx in np.arange(0, lenwat*norders, lenwat):
WAT2orders.append(WAT2list[idx:idx+lenwat])
#print(WAT2list[idx:idx+lenwat])
fitswaves = []
for idx, order in enumerate(WAT2orders):
#print(order)
dtype = order[2]
w1 = np.float64(order[3]) # dispersion coordinate of 1st physical pixel
dw = np.float64(order[4]) # average dispersion interval per physical pixel
nwave = int(order[5]) # number of wavelength points in the order
z = np.float64(order[6]) # Doppler factor
apmin, apmax = float(order[7]), float(order[8]) # original pixel limits along the spatial axis, not used
if dtype == '0': # linear
wavelengths = (w1 + dw * np.arange(nwave, dtype=np.float64)) / (1. + z)
elif dtype == '1': # log
wavelengths = (w1 + dw * np.arange(nwave, dtype=np.float64)) / (1. + z)
wavelengths = np.power(10., wavelengths)
elif dtype == '2': # nonlinear
if np.float64(order[6]) != 0:
print(np.float64(order[6]))
raise Warning('Nonzero Doppler factor in order {0}, not accounting for this.'.format(idx))
wt_i, w0_i, ftype_i = np.float64(order[9]), np.float64(order[10]), int(order[11])
cheb_order = int(order[12])
# ftype_i means 1 for a Chebyshev polynomial; 2-6 for other polynomials
# ONLY CONSIDERING CHEBYSHEV FOR NOW!
if ftype_i != 1:
raise ValueError('Sorry, the nonlinear dispersion is not a Chebyshev polynomial.')
pmin = np.float64(order[13])
pmax = np.float64(order[14])
pmiddle = (pmax + pmin) / 2
prange = pmax - pmin
coeffs = []
for cidx in range(0, cheb_order):
coeffs.append(np.float64(order[-5+cidx]))
xs = (np.arange(nwave, dtype=np.float64) + 1 - pmiddle) / ((prange) / 2)
p0 = np.ones(nwave, dtype=np.float64)
p1 = xs
wavelengths = p0 * coeffs[0] + p1 * coeffs[1]
for i in range(2, cheb_order):
p2 = 2 * xs * p1 - p0
wavelengths = wavelengths + p2 * coeffs[i]
p0 = p1
p1 = p2
#print(wavelengths) # it works!
else:
raise ValueError('Spectrum type not recognized.')
fitswaves.append(list(wavelengths))
fitswaves = np.array(fitswaves, dtype=np.float64)
return fitswaves # numpy array
def continuumFlattenSpec(waves, fluxes, window=50, fitplot=True):
'''
Fits a spline to a spectrum and divides to continuum flatten it.
Returns waves and fluxes/continuum with length = original_length - 2*window.
'''
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import UnivariateSpline
import pandas as pd
from PyAstronomy import pyasl
# Decide how long the final arrays will be
newlength = len(waves) - 2*window
# Identify outlier points
iin, iout = pyasl.slidingPolyResOutlier(waves, fluxes, points=window*2, deg=1, stdlim=3, mode='above', controlPlot=False)
#print("Number of outliers: ", len(iout))
#print("Indices of outliers: ", iout)
if len(iout) > 3*window:
raise ValueError('More than {0} outliers found, adjust stdlim'.format(3*window))
# Remove outliers
waves, fluxes = np.array(waves[iin]), np.array(fluxes[iin])
# Fit the continuum
specsmooth_top = | pd.Series(fluxes) | pandas.Series |
import numpy as np
import pandas as pd
import os, errno
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
from scipy.spatial.distance import squareform
from sklearn.decomposition.nmf import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
def save_df_to_npz(obj, filename):
np.savez_compressed(filename, data=obj.values, index=obj.index.values, columns=obj.columns.values)
def save_df_to_text(obj, filename):
obj.to_csv(filename, sep='\t')
def load_df_from_npz(filename):
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1,1))
D += squared_norms.reshape((1,-1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return(beta)
def fast_ols_all_cols_df(X,Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return(beta)
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean**2)
return(var)
def get_highvar_genes_sparse(expression, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy(); E2.data **= 2; gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean**2))
del(E2)
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var)/gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = ((gene_fano > w_fano_low) &
(gene_fano < w_fano_high) &
(gene_mean > w_mean_low) &
(gene_mean < w_mean_high))
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_mean + (B**2)
fano_ratio = (gene_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_mean,
'var': gene_var,
'fano': gene_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(input_counts, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var/gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var)/gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = ((gene_counts_fano > w_fano_low) &
(gene_counts_fano < w_fano_high) &
(gene_counts_mean > w_mean_low) &
(gene_counts_mean < w_mean_high))
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_counts_mean + (B**2)
fano_ratio = (gene_counts_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_counts_mean,
'var': gene_counts_var,
'fano': gene_counts_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
sc.pp.normalize_per_cell(tpm, counts_per_cell_after=1e6)
return(tpm)
class cNMF():
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = '%s_%s' % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, 'cnmf_tmp'))
self.paths = {
'normalized_counts' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.norm_counts.h5ad'),
'nmf_replicate_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_params.df.npz'),
'nmf_run_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_idvrun_params.yaml'),
'nmf_genes_list' : os.path.join(self.output_dir, self.name, self.name+'.overdispersed_genes.txt'),
'tpm' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm.h5ad'),
'tpm_stats' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm_stats.df.npz'),
'iter_spectra' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.iter_%d.df.npz'),
'iter_usages' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.usages.k_%d.iter_%d.df.npz'),
'merged_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.merged.df.npz'),
'local_density_cache': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.local_density_cache.k_%d.merged.df.npz'),
'consensus_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.dt_%s.consensus.df.npz'),
'consensus_spectra__txt': os.path.join(self.output_dir, self.name, self.name+'.spectra.k_%d.dt_%s.consensus.txt'),
'consensus_usages': os.path.join(self.output_dir, self.name, 'cnmf_tmp',self.name+'.usages.k_%d.dt_%s.consensus.df.npz'),
'consensus_usages__txt': os.path.join(self.output_dir, self.name, self.name+'.usages.k_%d.dt_%s.consensus.txt'),
'consensus_stats': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.stats.k_%d.dt_%s.df.npz'),
'clustering_plot': os.path.join(self.output_dir, self.name, self.name+'.clustering.k_%d.dt_%s.png'),
'gene_spectra_score': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_score.k_%d.dt_%s.df.npz'),
'gene_spectra_score__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_score.k_%d.dt_%s.txt'),
'gene_spectra_tpm': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_tpm.k_%d.dt_%s.df.npz'),
'gene_spectra_tpm__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_tpm.k_%d.dt_%s.txt'),
'k_selection_plot' : os.path.join(self.output_dir, self.name, self.name+'.k_selection.png'),
'k_selection_stats' : os.path.join(self.output_dir, self.name, self.name+'.k_selection_stats.df.npz'),
}
def get_norm_counts(self, counts, tpm,
high_variance_genes_filter = None,
num_highvar_genes = None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such that
no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data matching
counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using the
parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many most overdispersed genes
for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns (genes)normalized to unit
variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(tpm.X, numgenes=num_highvar_genes)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(np.array(tpm.X), numgenes=num_highvar_genes)
high_variance_genes_filter = list(tpm.var.index[gene_counts_stats.high_var.values])
## Subset out high-variance genes
norm_counts = counts[:, high_variance_genes_filter]
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print('Warning NaNs in normalized counts matrix')
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print('Warning NaNs in normalized counts matrix')
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths['nmf_genes_list'], 'w').write('\n'.join(high_variance_genes_filter))
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1)==0
if zerocells.sum()>0:
examples = norm_counts.obs.index[zerocells]
print('Warning: %d cells have zero counts of overdispersed genes. E.g. %s' % (zerocells.sum(), examples[0]))
print('Consensus step may not run when this is the case')
return(norm_counts)
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
sc.write(self.paths['normalized_counts'], norm_counts)
def get_nmf_iter_params(self, ks, n_iter = 100,
random_state_seed = None,
beta_loss = 'kullback-leibler'):
"""
Create a DataFrame with parameters for NMF iterations.
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several ``k`` are specified, this many
iterations will be run for each value of ``k``.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks)* n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2**32)-1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(replicate_params, columns = ['n_components', 'iter', 'nmf_seed'])
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver='mu',
tol=1e-4,
max_iter=400,
regularization=None,
init='random'
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == 'frobenius':
_nmf_kwargs['solver'] = 'cd'
return(replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths['nmf_replicate_parameters'])
with open(self.paths['nmf_run_parameters'], 'w') as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to ``non_negative_factorization``
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return(spectra, usages)
def run_nmf(self,
worker_i=1, total_workers=1,
):
"""
Iteratively run NMF with prespecified parameters.
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from self.paths['nmf_run_parameters'], defaults below::
``non_negative_factorization`` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of ``normalize_counts``)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of ``prepare_nmf_iter_params``)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
norm_counts = sc.read(self.paths['normalized_counts'])
_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
jobs_for_this_worker = worker_filter(range(len(run_params)), worker_i, total_workers)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print('[Worker %d]. Starting task %d.' % (worker_i, idx))
_nmf_kwargs['random_state'] = p['nmf_seed']
_nmf_kwargs['n_components'] = p['n_components']
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(spectra,
index=np.arange(1, _nmf_kwargs['n_components']+1),
columns=norm_counts.var.index)
save_df_to_npz(spectra, self.paths['iter_spectra'] % (p['n_components'], p['iter']))
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
print('Combining factorizations for k=%d.'%k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components==k)
run_params_subset = run_params[run_params.n_components==k].sort_values('iter')
spectra_labels = []
for i,p in run_params_subset.iterrows():
spectra = load_df_from_npz(self.paths['iter_spectra'] % (p['n_components'], p['iter']))
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p['iter'], :, :] = spectra.values
for t in range(k):
spectra_labels.append('iter%d_topic%d'%(p['iter'], t+1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(combined_spectra, columns=spectra.columns, index=spectra_labels)
save_df_to_npz(combined_spectra, self.paths['merged_spectra']%k)
return combined_spectra
def consensus(self, k, density_threshold_str='0.5', local_neighborhood_size = 0.30,show_clustering = False,
skip_density_and_return_after_stats = False, close_clustergram_fig=True):
merged_spectra = load_df_from_npz(self.paths['merged_spectra']%k)
norm_counts = sc.read(self.paths['normalized_counts'])
if skip_density_and_return_after_stats:
density_threshold_str = '2'
density_threshold_repl = density_threshold_str.replace('.', '_')
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0]/k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T/np.sqrt((merged_spectra**2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths['local_density_cache'] % k):
local_density = load_df_from_npz(self.paths['local_density_cache'] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors+1)[:, :n_neighbors+1]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[np.arange(topics_dist.shape[0])[:, None], partitioning_order]
local_density = pd.DataFrame(distance_to_nearest_neighbors.sum(1)/(n_neighbors),
columns=['local_density'],
index=l2_spectra.index)
save_df_to_npz(local_density, self.paths['local_density_cache'] % k)
del(partitioning_order)
del(distance_to_nearest_neighbors)
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(kmeans_model.labels_+1, index=l2_spectra.index)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T/median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(l2_spectra.values, kmeans_cluster_labels, metric='euclidean')
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
refit_nmf_kwargs.update(dict(
n_components = k,
H = median_spectra.values,
update_H = False
))
_, rf_usages = self._nmf(norm_counts.X,
nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(rf_usages, index=norm_counts.obs.index, columns=median_spectra.index)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = ((norm_counts.X.todense() - rf_pred_norm_counts)**2).sum().sum()
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts)**2).sum().sum()
consensus_stats = pd.DataFrame([k, density_threshold, stability, prediction_error],
index = ['k', 'local_density_threshold', 'stability', 'prediction_error'],
columns = ['stats'])
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(median_spectra, self.paths['consensus_spectra']%(k, density_threshold_repl))
save_df_to_npz(rf_usages, self.paths['consensus_usages']%(k, density_threshold_repl))
save_df_to_npz(consensus_stats, self.paths['consensus_stats']%(k, density_threshold_repl))
save_df_to_text(median_spectra, self.paths['consensus_spectra__txt']%(k, density_threshold_repl))
save_df_to_text(rf_usages, self.paths['consensus_usages__txt']%(k, density_threshold_repl))
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths['tpm'])
tpm_stats = load_df_from_npz(self.paths['tpm_stats'])
if sp.issparse(tpm.X):
norm_tpm = (np.array(tpm.X.todense()) - tpm_stats['__mean'].values) / tpm_stats['__std'].values
else:
norm_tpm = (tpm.X - tpm_stats['__mean'].values) / tpm_stats['__std'].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(usage_coef, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(usage_coef, self.paths['gene_spectra_score']%(k, density_threshold_repl))
save_df_to_text(usage_coef, self.paths['gene_spectra_score__txt']%(k, density_threshold_repl))
# Convert spectra to TPM units, and obtain results for all genes by running last step of NMF
# with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(
H = norm_usages.T.values,
))
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(spectra_tpm, self.paths['gene_spectra_tpm']%(k, density_threshold_repl))
save_df_to_text(spectra_tpm, self.paths['gene_spectra_tpm__txt']%(k, density_threshold_repl))
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][:, density_filter.values]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels==cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[cl_dist < 0] = 0 #Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, 'average')
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(len(height_ratios), len(width_ratios), fig,
0.01, 0.01, 0.98, 0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0, hspace=0)
dist_ax = fig.add_subplot(gs[1,1], xscale='linear', yscale='linear',
xticks=[], yticks=[],xlabel='', ylabel='',
frameon=True)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(D, interpolation='none', cmap='viridis', aspect='auto',
rasterized=True)
left_ax = fig.add_subplot(gs[1,0], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
left_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
top_ax = fig.add_subplot(gs[0,1], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
top_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
hist_gs = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1, 3],
wspace=0, hspace=0)
hist_ax = fig.add_subplot(hist_gs[0,0], xscale='linear', yscale='linear',
xlabel='', ylabel='', frameon=True, title='Local density histogram')
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle='--', color='k')
hist_ax.text(density_threshold + 0.02, ylim[1] * 0.95, 'filtering\nthreshold\n\n', va='top')
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel('Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering'%(sum(~density_filter), len(density_filter), 100*(~density_filter).mean()))
fig.savefig(self.paths['clustering_plot']%(k, density_threshold_repl), dpi=250)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
'''
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
'''
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(self.consensus(k, skip_density_and_return_after_stats=True).stats)
stats = | pd.DataFrame(stats) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.096], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_cec(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_cec(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
Adjust=cec_module_params['Adjust'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.0896], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_pvsyst(pvsyst_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
temp_cell = pd.Series([25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_pvsyst(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'])
assert_series_equal(
IL.round(decimals=3), pd.Series([0.0, 4.8200], index=times))
assert_series_equal(
I0.round(decimals=3), pd.Series([0.0, 1.47e-7], index=times))
assert_allclose(Rs, 0.500)
assert_series_equal(
Rsh.round(decimals=3), pd.Series([1000.0, 305.757], index=times))
assert_series_equal(
nNsVth.round(decimals=4), | pd.Series([1.6186, 1.7961], index=times) | pandas.Series |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, | pd.Timestamp('2015-01-17') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import urllib
import os
from io import StringIO
from io import BytesIO
import csv
import numpy as np
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
# In[2]:
os.system("curl https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2008.txt https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2009.txt https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2010.txt > archivo.txt")
datos= | pd.read_csv("archivo.txt", delimiter=";", header=None, decimal=",") | pandas.read_csv |
"""Create a mapping from 'clean title' to original title in a sqlite database.
----
Copyright 2019 Data Driven Empathy LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sqlite3
import sys
import pandas
import data_util
NUM_ARGS = 1
USAGE_STR = 'python create_clean_title_bridge.py [path to db]'
def perform_operations(db_loc):
"""The DB location at which the table should be created.
Args:
db_loc: String path to the sqlite database on which to operate.
"""
data_loader = data_util.DataLoader(db_loc=db_loc)
output_frame_not_clean = data_loader.load_data(
output_col_title='newTitle',
output_col_description='newDescription'
)
output_frame = | pandas.DataFrame() | pandas.DataFrame |
import os
import yaml
import lasagne
import pandas as pd
import numpy as np
from network import Network
import architectures as arches
L = lasagne.layers
# loads data with names according to autoload_data.py
from autoload_data import *
# load specs for all networks
with open('arch_specs.yaml') as archfile:
arch_dict = yaml.load(archfile)
### Compiling results from saved parameters ###
def compute_pretrained_results(net, archname, idx, test_data, fake=False):
"""
Compute pre-tuning results for a given arch/network on appropriate test data
"""
Xt, yt = test_data
if fake:
fname = '{} {} split fake data.npz'
fname = fname.format('fake_' + archname, idx)
paramsdir = os.path.join(paramsdir_, 'fake_' + archname)
else:
fname = '{} {} split agg fit exp 1-4.npz'
fname = fname.format(archname.replace('_', ' '), idx)
paramsdir = os.path.join(paramsdir_, archname[:-1])
results_df = pd.DataFrame(index=np.arange(Xt.shape[0]), columns=[idx])
net.load_params(os.path.join(paramsdir, fname))
nlls = net.itemized_test_fn(Xt, yt)
predictions = net.output_fn(Xt)
results_df[idx] = nlls
return results_df, predictions
def compute_tuned_results(net, archname, idx, test_idx, test_data, df):
"""
Compute post-tuning results for a given architecture/network on appropriate
test data
"""
Xt, yt = test_data
group_idx = (test_idx - 1) % 5 # fix eventually to take df/groupidx/selection passed independently?
selection = df.loc[df['group']==(group_idx+1)].index.values
results_df = pd.DataFrame(index=np.arange(Xt.shape[0]), columns=[idx])
predictions_df = pd.DataFrame(index=selection, columns=np.arange(36))
fname = '{} {} agg fit exp 1-4 {} tune fit exp 0.npz'
fname = fname.format(archname.replace('_', ' '), idx, test_idx)
net.load_params(os.path.join(paramsdir_, archname[:-1], fname))
nlls = net.itemized_test_fn(Xt[selection, :, :, :], yt[selection])
predictions = net.output_fn(Xt[selection, :, :, :])
predictions_df.loc[selection, :] = predictions
results_df.loc[selection, idx] = nlls
return results_df, predictions_df
def compute_net_results(net, archname, test_data, df):
"""
For a given network, test on appropriate test data and return dataframes
with results and predictions (named obviously)
"""
pretrain_results = []
pretrain_predictions = []
tune_results = []
tune_predictions = []
for idx in range(5):
results_df, predictions_df = compute_pretrained_results(net, archname, idx, test_data)
pretrain_results.append(results_df)
pretrain_predictions.append(predictions_df)
pretrain_results = pd.concat(pretrain_results, axis=1)
for idx in range(5):
for test_idx in range(5):
results_df, predictions_df = compute_tuned_results(net, archname, idx, test_idx, test_data, df)
tune_results.append(results_df)
tune_predictions.append(predictions_df)
tune_results = | pd.concat(tune_results, axis=1, join='inner') | pandas.concat |
"""Helper functions to read and convert common
data formats."""
import pandas as pd
import numpy as np
import os
import logging
from functools import partial
from .format_checkers import _is_bed_row
def convert_bed_to_bedpe(input_file, target_file, halfwindowsize, chromsize_path):
"""Converts bedfile at inputFile to a bedpefile,
expanding the point of interest up- and downstream
by halfwindowsize basepairs.
Only intervals that fall within the bounds of
chromosomes are written out.
"""
# load input_file
input_frame = pd.read_csv(input_file, sep="\t", header=None)
# handle case that positions are specified in two columns
if (
len(input_frame.columns) > 2
): # assuming second and third column hold position info
input_frame = input_frame.rename(columns={0: "chrom", 1: "start", 2: "end"})
input_frame.loc[:, "pos"] = (input_frame["start"] + input_frame["end"]) // 2
temp_frame = input_frame[["chrom", "pos"]]
else: # assuming second column holds position info
input_frame = input_frame.rename(columns={0: "chrom", 1: "pos"})
temp_frame = input_frame
# stitch together output frame
left_pos = temp_frame["pos"] - halfwindowsize
right_pos = temp_frame["pos"] + halfwindowsize
half_frame = pd.DataFrame(
{"chrom": temp_frame["chrom"], "start1": left_pos, "end1": right_pos}
)
# filter by chromosome sizes
chrom_sizes = pd.read_csv(chromsize_path, sep="\t", header=None)
chrom_sizes.columns = ["chrom", "length"]
half_frame_chromo = pd.merge(half_frame, chrom_sizes, on="chrom")
# generate filter expression
retained_rows = (half_frame_chromo["start1"] > 0) & (
half_frame_chromo["end1"] < half_frame_chromo["length"]
)
# filter dataframe
filtered = half_frame_chromo.loc[retained_rows, :].drop(columns=["length"])
# select row_ids of the original bed-file that are retained
bed_row_index = np.arange(len(half_frame_chromo))[retained_rows]
# construct final dataframe and write it to file
final = pd.concat((filtered, filtered), axis=1)
# add bed_row_index as final column
final.loc[:, "bed_row_index"] = bed_row_index
final.to_csv(target_file, sep="\t", header=None, index=False)
def clean_bed(input_file, output_file):
"""
Loads in bedfile and removes headers.
"""
# first, read in the file
with open(input_file, "r") as f:
content = f.read()
lines = content.split("\n")
# strip comment heade
skipped_rows = 0
for line in lines:
if (line[0] == "#") or (line[:5] == "track") or (line[:7] == "browser"):
skipped_rows += 1
continue
break
file_accumulator = []
# check whether next line contains column names -> first three columns will contain chrSomething number number
potential_header_line = lines[skipped_rows]
split_header = potential_header_line.split("\t")
if not _is_bed_row(split_header):
# header present
skipped_rows += 1
stripped_lines = lines[skipped_rows:]
for line in stripped_lines:
if line == "":
# Skip empty last line
continue
file_accumulator.append(line.split("\t"))
# construct dataframe and save
data = pd.DataFrame(file_accumulator)
data.to_csv(output_file, sep="\t", index=False, header=None)
def sort_bed(input_file, output_file, chromsizes):
"""Sorts entries in bedfile according to chromsizes and
writes it to a file. input_file, output_file and chromsizes
should be a string containing the path to the respective
files. Will filter chromosomes so that only ones in chromsizes
are retained."""
# create helper sort function
def chromo_sort_function(element, data_unsorted, chromsizes):
return chromsizes.index(data_unsorted.iloc[element, 0])
data_unsorted = | pd.read_csv(input_file, sep="\t", header=None, comment="#") | pandas.read_csv |
import pandas as pd
from functools import reduce
def load():
print("Cargando datos")
datos ={}
"""
Seguridad y convivencia
"""
datos['Convivencia'] = data_convivencia = pd.read_excel('./data/datos separados.xlsx', 'Indicadores de convivencia decr')
datos['Seguridad'] = data_seguridad = | pd.read_excel('./data/datos separados.xlsx', 'Indicadores de seguridad') | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
tranmessagefile='messages\\'+transmessfile.get()
with open(tranmessagefile, 'r') as file:
blanktransmess=file.read()
except:
print('Problem loading mastersignups, famcontacts')
return
notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs)
if mtype.get()=='Unis':
try:
missing=pd.read_csv(messfile.get(), encoding='cp437')
oldteams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
# TODO Finish ask for missing uniforms script
askforunis(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Cards':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading famcontacts, mastersignups, or blank message')
return
# TODO Finish ask for missing uniforms script
askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Other':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = | pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style
matplotlib.style.use('ggplot')
import pandas as pd
from petersburg import Graph
__author__ = 'willmcginnis'
if __name__ == '__main__':
g = Graph()
# necktie paradox
g.from_dict({
1: {'payoff': 0, 'after': []},
2: {'payoff': 0, 'after': [{'node_id': 1}]},
3: {'payoff': 0, 'after': [{'node_id': 1}]},
4: {'payoff': 100, 'after': [{'node_id': 2, 'cost': 0}, {'node_id': 3, 'cost': 0}]},
5: {'payoff': 50, 'after': [{'node_id': 3, 'cost': 0}, {'node_id': 2, 'cost': 0}]},
})
data_2 = []
data_3 = []
for iter in [5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 1000000]:
outcomes = g.get_options(iters=iter, extended_stats=True)
data_2.append([
outcomes[2]['count'],
outcomes[2]['mean'],
outcomes[2]['min'],
outcomes[2]['max']
])
data_3.append([
outcomes[3]['count'],
outcomes[3]['mean'],
outcomes[3]['min'],
outcomes[3]['max']
])
df = | pd.DataFrame(data_2, columns=['iters', 'outcome', 'min', 'max']) | pandas.DataFrame |
from numbers import Number
from typing import List
import functools
import re
import pandas as pd
import numpy as np
from string_grouper import StringGrouper, StringGrouperConfig
def field(field_name: str, weight=1.0, **kwargs):
'''
Function that returns a triple corresponding to a field:
field_name, weight, StringGrouperConfig(**kwargs)
:param field_name: str
:param weight: relative priority given to this field. Defaults to 1.0.
:param kwargs: keyword arguments to be passed to StringGrouper
'''
_ = StringGrouperConfig(**kwargs) # validate kwargs
return field_name, weight, kwargs
def field_pair(field_name1: str, field_name2: str, weight=1.0, **kwargs):
'''
Function that returns a quadruple corresponding to a field:
field_name1, field_name2, weight, StringGrouperConfig(**kwargs)
:param field_name1: str
:param field_name2: str
:param weight: relative priority given to this field-pair. Defaults to
1.0.
:param kwargs: keyword arguments to be passed to StringGrouper
'''
_ = StringGrouperConfig(**kwargs) # validate kwargs
return field_name1, field_name2, weight, kwargs
def record_linkage(data_frames,
fields_2b_matched_fuzzily,
fields_2b_matched_exactly=None,
hierarchical=True,
**kwargs):
'''
Function that combines similarity-matching results of several fields of one
or two DataFrames and returns them in another DataFrame.
:param data_frames: either a pandas DataFrame or a list of two pandas
DataFrames.
:param fields_2b_matched_fuzzily: List of tuples. If data_frames is a
pandas DataFrame, then each tuple is a triple
(<field name>, <weight>, <field_kwargs>) which can be input using
utility function field(name, weight, **kwargs).
<field name> is the name of a field in data_frames which is to be
matched.
If data_frames is a list of two pandas DataFrames, then each tuple is
a quadruple
(<field name1>, <field name2>, <weight>, <field_kwargs>) which can be
input using utility function
field_pair(name1, name2, weight, **kwargs).
<field name1> is the name of a field in data_frame[0] which is to be
matched. <field name2> is the name of a field in data_frame[1] which
is to be matched.
<weight> is a number that defines the **relative** importance of the
field (or field-pair) to other fields (or field-pairs) -- the field's
(or field-pair's) contribution to the total similarity will be
weighted by this number.
<field_kwargs> is a python dict capturing any keyword arguments to be
passed to StringGrouper for this field (or field-pair).
:param fields_2b_matched_exactly: List of tuples. If data_frames is a
pandas DataFrame, then each tuple is a pair
(<field name>, <weight>) which can be input using
utility function field(name, weight).
<field name> is the name of a field in data_frames which is to be
matched.
If data_frames is a list of two pandas DataFrames, then each tuple is
a triple
(<field name1>, <field name2>, <weight>) which can be input using
utility function field_pair(name1, name2, weight).
<field name1> is the name of a field in data_frame[0] which is to be
matched. <field name2> is the name of a field in data_frame[1] which
is to be matched.
<weight> has the same meaning as in parameter
fields_2b_matched_fuzzily. Defaults to None.
:param hierarchical: bool. Determines if the output DataFrame will have a
hierarchical column-structure (True) or not (False). Defaults to True.
:param kwargs: keyword arguments to be passed to StringGrouper for all
"fields to be matched fuzzily". However, any keyword arguments already
given in fields_2b_matched_fuzzily will take precedence over those
given in kwargs.
:return: pandas.DataFrame containing matching results.
'''
def get_field1_names(fields_tuples):
return [n[0] for n in fields_tuples]
def get_field2_names(fields_tuples):
return [n[1] for n in fields_tuples]
def get_field_names(fields_tuples):
if isinstance(data_frames, list):
return [f'{n1}/{n2}' for n1, n2, _, _ in fields_tuples]
else:
return get_field1_names(fields_tuples)
def get_field_weights(field_tuples):
if isinstance(data_frames, list):
return [w for _, _, w, _ in field_tuples]
else:
return [w for _, w, _ in field_tuples]
def get_field_value_pairs(field1_names, field2_names, values):
if field2_names is None:
return [(n1, v) for n1, v in zip(field1_names, values)]
else:
return [(f'{n1}/{n2}', v)
for n1, n2, v in zip(field1_names, field2_names, values)]
def get_index_names(df):
empty_df = df.iloc[0:0]
return [field for field in empty_df.reset_index().columns
if field not in empty_df.columns]
def prepend(strings, prefix):
return [f'{prefix}{i}' for i in strings]
def horizontal_linkage(df1, df2,
match_indexes,
fuzzy_field_grouper_pairs,
fuzzy_field_names,
fuzzy_field_weights,
exact_field_value_pairs=None,
exact_field_weights=None,
hierarchical=True):
horizontal_merger_list = []
if df2 is None:
for field_name1, sg in fuzzy_field_grouper_pairs:
matches = sg.match_strings(df1[field_name1])
sg.clear_data()
matches.set_index(match_indexes, inplace=True)
matches = weed_out_trivial_matches(matches)
if hierarchical:
merger = matches[
[f'left_{field_name1}', 'similarity',
f'right_{field_name1}']]
merger.rename(
columns={
f'left_{field_name1}': 'left',
f'right_{field_name1}': 'right'},
inplace=True)
else:
merger = matches[['similarity']]
merger.rename(
columns={'similarity': field_name1},
inplace=True)
horizontal_merger_list += [merger]
else:
for field_name1, field_name2, sg in fuzzy_field_grouper_pairs:
matches = sg.match_strings(df1[field_name1], df2[field_name2])
sg.clear_data()
matches.set_index(match_indexes, inplace=True)
if hierarchical:
merger = matches[
[f'left_{field_name1}', 'similarity',
f'right_{field_name2}']]
merger.rename(
columns={
f'left_{field_name1}': 'left',
f'right_{field_name2}': 'right'},
inplace=True)
else:
merger = matches[['similarity']]
merger.rename(
columns={'similarity': f'{field_name1}/{field_name2}'},
inplace=True)
horizontal_merger_list += [merger]
key_list = None if not hierarchical else fuzzy_field_names
merged_df = pd.concat(
horizontal_merger_list,
axis=1,
keys=key_list,
join='inner')
title_exact = 'Exactly Matched Fields'
title_fuzzy = 'Fuzzily Matched Fields'
if exact_field_value_pairs:
exact_df = build_column_precursor_to(
merged_df,
exact_field_value_pairs)
merged_df = pd.concat(
[exact_df, merged_df],
axis=1,
keys=[title_exact, title_fuzzy],
join='inner')
totals = compute_totals(
merged_df,
fuzzy_field_names,
fuzzy_field_weights,
exact_field_weights,
hierarchical,
exact_field_value_pairs,
title_fuzzy)
return pd.concat([totals, merged_df], axis=1)
def weed_out_trivial_matches(matches):
num_indexes = matches.index.nlevels//2
return matches[functools.reduce(
lambda a, b: a | b,
[(matches.index.get_level_values(i) !=
matches.index.get_level_values(i + num_indexes))
for i in range(num_indexes)])]
def build_column_precursor_to(df, exact_field_value_pairs):
exact_df = df.iloc[:, 0:0]
exact_df = exact_df.assign(
**{field_name: field_value
for field_name, field_value in exact_field_value_pairs})
return exact_df
def compute_totals(merged_df,
fuzzy_field_names,
fuzzy_field_weights,
exact_field_weights,
hierarchical,
exact_field_value_pairs,
title_fuzzy):
title_total = 'Weighted Mean Similarity Score'
fuzzy_weight_array = np.array(fuzzy_field_weights, dtype=float)
if exact_field_value_pairs:
exact_weight_array = np.array(exact_field_weights, dtype=float)
total = fuzzy_weight_array.sum() + exact_weight_array.sum()
fuzzy_weight_array /= total
exact_field_contribution = (exact_weight_array/total).sum()
if hierarchical:
totals = (merged_df[[(title_fuzzy, field, 'similarity')
for field in fuzzy_field_names]]
.dot(fuzzy_weight_array)) + exact_field_contribution
totals = pd.concat([totals], axis=1,
keys=[('', '', title_total)])
else:
totals = (merged_df[[(title_fuzzy, field)
for field in fuzzy_field_names]]
.dot(fuzzy_weight_array)) + exact_field_contribution
totals = | pd.concat([totals], axis=1, keys=[('', title_total)]) | pandas.concat |
"""Corpora dataset exporter"""
import csv
import hashlib
import logging
import os
import boto3
import pandas
import sqlalchemy
from voice_corpora_automation import config, queries
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
class DatasetExporter:
"""Exporter of the dataset from CV (excluding clips)"""
def __init__(self, query=queries.FULL_DATASET_QUERY):
self.cv_engine = sqlalchemy.create_engine(config.CV_DATABASE_URL, echo=True)
self.corpora_engine = sqlalchemy.create_engine(
config.CORPORA_DATABASE_URL, echo=True
)
self.query = query
self.dataframe = None
self.diff = None
def load_data(self):
"""Read SQL data from CV database to a pandas DataFrame"""
LOGGER.info("Reading data from CV database;")
self.dataframe = pandas.read_sql(self.query, self.cv_engine)
return self.dataframe
def prepare_diff(self):
"""Filter out entries already existing in a previous version"""
LOGGER.info("Prepare diff for corpora")
try:
LOGGER.info("Calculating diff for the new version")
current_corpora = pandas.read_sql(
config.CORPORA_DATABASE_TABLE, self.corpora_engine
)
self.diff = self.dataframe[~self.dataframe.path.isin(current_corpora.path)]
except:
LOGGER.info("Something went wrong, falling back to using the whole")
self.diff = self.dataframe
def process_data(self):
"""Preprocess `client_id`, `path`, `sentence`"""
LOGGER.info("Processing fields")
hasher = lambda x: hashlib.sha512(x.encode("utf-8")).hexdigest()
renamer = lambda x: f"common_voice_{x.locale}_{x.id}.mp3"
self.dataframe["cv_path"] = self.dataframe["path"]
self.dataframe["client_id"] = self.dataframe["client_id"].apply(hasher)
self.dataframe["path"] = self.dataframe.apply(renamer, axis=1)
self.dataframe["sentence"] = self.dataframe["sentence"].str.replace("\r", " ")
self.dataframe.rename(columns={"client_id": "hashed_client_id"})
def export_diff(self):
"""Export diff to TSV format"""
LOGGER.info("Store dataset as TSV")
path = os.path.join(config.CV_EXPORT_DIR, config.CV_EXPORT_FILENAME)
cols = [col for col in self.diff.columns if col != "cv_path"]
self.diff[cols].to_csv(
path,
sep="\t",
index=False,
quoting=csv.QUOTE_NONE,
encoding="utf-8",
escapechar="\\",
)
class DatasetUploader:
"""Uploader of the complete dataset from CV"""
def __init__(self, original_dataset):
self.engine = sqlalchemy.create_engine(config.CORPORA_DATABASE_URL, echo=True)
self.corpora_path = config.CORPORA_EXPORT_DIR
self.original_dataset = original_dataset
self.dataframe = None
def load(self):
"""Load the output of the corpora creator to a TSV"""
partitions = ["dev", "invalidated", "other", "test", "train", "validated"]
self.dataframe = pandas.DataFrame()
LOGGER.info("Loading corpora")
for locale in os.listdir(self.corpora_path):
for part in partitions:
LOGGER.info("Locale: %s", locale)
LOGGER.info("Part: %s", part)
path = os.path.join(self.corpora_path, locale, f"{part}.tsv")
partial_df = pandas.read_csv(path, sep="\t")
partial_df["locale"] = locale
partial_df["partition"] = part
partial_df["timestamp"] = config.TIMESTAMP
self.dataframe = | pandas.concat([self.dataframe, partial_df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
sys.path.append('..')
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime, date
import os
from utils import data_paths, load_config
from pathlib import Path
from nltk.metrics import edit_distance #(Levenshtein)
import pycountry
import math
# # Estimating The Infected Population From Deaths
# > Estimating the number of infected people by country based on the number of deaths and case fatality rate.
#
# - comments: true
# - author: <NAME>
# - categories: [growth, compare, interactive, estimation]
# - hide: false
# - image: images/covid-estimate-infections.png
# - permalink: /covid-infected/
# - toc: true
# In[4]:
LOCAL_FILES=True
#jupyter or script
IS_SCRIPT = False
# In[5]:
os.getcwd()
# In[6]:
if IS_SCRIPT:
RUN_PATH = Path(os.path.realpath(__file__))
DATA_PARENT = RUN_PATH.parent.parent
else:
#for jupyter
cw = get_ipython().getoutput('pwd')
RUN_PATH = Path(cw[0])
DATA_PARENT = RUN_PATH.parent
# In[7]:
if IS_SCRIPT:
csse_data = data_paths('tools/csse_data_paths.yml')
else:
csse_data = data_paths('csse_data_paths.yml')
# In[8]:
if LOCAL_FILES:
confirmed_url=csse_data.get("csse_ts_local", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_local", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_local", {}).get('recovered', {})
confirmed_url = str(DATA_PARENT/confirmed_url)
deaths_url = str(DATA_PARENT/deaths_url)
recovered_url = str(DATA_PARENT/recovered_url)
else:
confirmed_url=csse_data.get("csse_ts_global", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_global", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_global", {}).get('recovered', {})
# In[9]:
### UN stats
# In[10]:
df_un_pop_density_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_density_info.csv')
df_un_urban_growth_info=pd.read_csv(DATA_PARENT/'data/un/urban_growth_info.csv')
df_un_health_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_info.csv')
df_un_tourism_info=pd.read_csv(DATA_PARENT/'data/un/df_un_tourism_info.csv')
df_un_gdp_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdp_info.csv')
df_un_edu_info=pd.read_csv(DATA_PARENT/'data/un/df_un_edu_info.csv')
df_un_pop_growth_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_growth_info.csv')
df_un_gdrp_rnd_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdrp_rnd_info.csv')
df_un_education_info=pd.read_csv(DATA_PARENT/'data/un/df_un_education_info.csv')
df_un_sanitation_info=pd.read_csv(DATA_PARENT/'data/un/df_un_sanitation_info.csv')
df_un_health_expenditure_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_expenditure_info.csv')
df_un_immigration_info=pd.read_csv(DATA_PARENT/'data/un/df_un_immigration_info.csv')
df_un_trading_info=pd.read_csv(DATA_PARENT/'data/un/df_un_trading_info.csv')
df_un_land_info=pd.read_csv(DATA_PARENT/'data/un/df_un_land_info.csv')
# In[11]:
df_un_health_info.head()
#Health personnel: Pharmacists (per 1000 population)
# In[12]:
df_un_trading_info.tail(n=20)
#column Major trading partner 1 (% of exports)
#Major trading partner 1 (% of exports)
#Major trading partner 2 (% of exports)
#Major trading partner 3 (% of exports)
# In[13]:
df_population_density=df_un_pop_density_info.loc[df_un_pop_density_info['Series'] == 'Population density']
# In[14]:
df_population_density.tail(n=50)
#Population aged 60+ years old (percentage)
#Population density
#Population mid-year estimates (millions)
# In[15]:
df_population_density.loc[df_population_density.groupby('Country')['Year'].idxmax()]
# In[16]:
df_population_density
# In[17]:
### Freedom House stats
# In[18]:
#Freedon House stats
def country_freedom():
global_freedom = str(DATA_PARENT/'data/freedom_house/Global_Freedom.csv')
df_global_free = pd.read_csv(global_freedom)
internet_freedom = str(DATA_PARENT/'data/freedom_house/Internet_Freedom.csv')
df_internet_free = pd.read_csv(internet_freedom)
return df_global_free, df_internet_free
df_global_freedom, df_internet_freedom = country_freedom()
# In[19]:
#csse countries
df_deaths = pd.read_csv(deaths_url, error_bad_lines=False)
df_confirmed = pd.read_csv(confirmed_url, error_bad_lines=False)
df_recovered = pd.read_csv(recovered_url, error_bad_lines=False)
csse_countries = []
for df in [df_deaths, df_confirmed, df_recovered]:
c = set(df["Country/Region"].unique())
csse_countries.append(c)
csse_countries = [item for sublist in csse_countries for item in sublist]
csse_countries = list(set(csse_countries))
# ## CSSE
# In[20]:
# Get data on deaths D_t
df_deaths = pd.read_csv(deaths_url, error_bad_lines=False)
df_deaths = df_deaths.drop(columns=["Lat", "Long"])
df_deaths = df_deaths.melt(id_vars= ["Province/State", "Country/Region"])
df_deaths = pd.DataFrame(df_deaths.groupby(['Country/Region', "variable"]).sum())
df_deaths.reset_index(inplace=True)
df_deaths = df_deaths.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_deaths"})
df_deaths['date'] =pd.to_datetime(df_deaths.date)
df_deaths = df_deaths.sort_values(by = "date")
df_deaths.loc[df_deaths.location == "US","location"] = "United States"
df_deaths.loc[df_deaths.location == "Korea, South","location"] = "South Korea"
# In[21]:
#confirmed
# In[22]:
df_confirmed = pd.read_csv(confirmed_url, error_bad_lines=False)
df_confirmed = df_confirmed.drop(columns=["Lat", "Long"])
df_confirmed = df_confirmed.melt(id_vars= ["Province/State", "Country/Region"])
df_confirmed = pd.DataFrame(df_confirmed.groupby(['Country/Region', "variable"]).sum())
df_confirmed.reset_index(inplace=True)
df_confirmed = df_confirmed.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"})
df_confirmed['date'] =pd.to_datetime(df_confirmed.date)
df_confirmed = df_confirmed.sort_values(by = "date")
df_confirmed.loc[df_confirmed.location == "US","location"] = "United States"
df_confirmed.loc[df_confirmed.location == "Korea, South","location"] = "South Korea"
# In[23]:
df_confirmed.head()
# In[24]:
df_final = pd.merge(df_deaths,
df_confirmed)
# In[25]:
df_final.head()
# In[26]:
df_final["CFR"] = df_final["total_deaths"]/df_final["total_cases"]
df_final["total_infected"] = np.NaN
df_final = df_final.sort_values(by = ['location', 'date'])
df_final = df_final.reset_index(drop = True)
# In[27]:
df_un_pop_per_country=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_per_country_info.csv')
# In[28]:
def get_country_list(pop_cutoff=5.0):
pop_nmill=df_un_pop_per_country.loc[df_un_pop_per_country['Value'] >= pop_cutoff]
countries_n_plus=pop_nmill.Country.tolist()
return countries_n_plus
# In[29]:
csse_countries.sort()
csse_countries
# In[30]:
csse_countries=list(map(lambda x: x if x != 'Korea, South' else "South Kores", csse_countries))
# In[31]:
countries_n_plus = get_country_list(pop_cutoff=5.0)
# In[32]:
for j in countries_n_plus:
if not j in csse_countries:
print(j)
# In[33]:
for j in countries_n_plus:
for i in df_final["date"].unique()[0:-8]:
numer = df_final.loc[(df_final.date == i + np.timedelta64(8, 'D')) & (df_final.location == j), "total_deaths"].iloc[0]
denom = df_final.loc[(df_final.date == i + np.timedelta64(8, 'D')) & (df_final.location == j), "CFR"].iloc[0]
df_final.loc[(df_final.date == i) & (df_final.location == j), "total_infected"] = numer/denom
# In[34]:
df_final.head()
# In[35]:
# Estimate growth rate of infected, g
df_final['infected_g'] = np.log(df_final['total_infected'])
df_final['infected_g'] = df_final['infected_g'].diff()
# In[36]:
# Estimate number of infected given g
today = df_final.date.iloc[-1]
for j in countries_n_plus:
for i in range(7,-1,-1):
df_final.loc[(df_final.location == j) & (df_final.date == today - timedelta(i)), "total_infected"] = df_final.loc[df_final.location == j, "total_infected"].iloc[-i-2]*(1+df_final.loc[df_final.location == j, "infected_g"].aggregate(func = "mean"))
# In[37]:
data_pc = df_final[['location', 'date', 'total_infected']].copy()
# In[38]:
data_countries = []
data_countries_pc = []
# In[39]:
for i in countries_n_plus:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
# In[40]:
# Get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries_n_plus:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# In[41]:
len(data_countries_pc)
# In[42]:
data_countries_pc[0]
# ## Estimated Infected Population By Country
#
# by days since outbreak
# In[43]:
# Lastest Country Estimates
label = 'Total_Infected'
temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020']
# In[44]:
metric_name = f'{label}'
temp.columns = ['Country', 'Date', metric_name]
# temp.loc[:, 'month'] = temp.date.dt.strftime('%Y-%m')
temp.loc[:, "Total_Infected"] = temp.loc[:, "Total_Infected"].round(0)
temp.groupby('Country').last()
# In[ ]:
# ## Infected vs. number of confirmed cases
# > Allows you to compare how countries have been tracking the true number of infected people.
# The smaller deviation from the dashed line (45 degree line) the better job at tracking the true number of infected people.
# In[45]:
data_pc = df_final.copy()
# In[46]:
data_countries = []
data_countries_pc = []
# In[47]:
for i in countries_n_plus:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]
# get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries_n_plus:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# In[48]:
type(data_countries_pc[0])
# In[49]:
data_countries_pc[0]
# In[ ]:
# In[50]:
def get_df_country(country):
for i, df in enumerate(data_countries_pc):
if len(df.loc[df['location'] == country]):
print(f'country: {country}, index: {i}')
# In[51]:
get_df_country('Italy')
# In[52]:
data_countries_pc[47]
# In[79]:
df_all_data_countries_pc=pd.concat(data_countries_pc)
# In[81]:
df_all_data_countries_pc.tail()
# In[ ]:
#### save all pred as one df
# In[82]:
df_all_data_countries_pc.to_csv(DATA_PARENT/'data/processed/csse/df_all_data_countries_pc.csv')
# In[ ]:
# In[ ]:
### Combine last day only pred with un and freedom house data
# In[53]:
df_country_un_stats = pd.read_csv(DATA_PARENT/'data/un/df_un_merged_stats.csv')
# In[60]:
df_country_un_stats.rename(columns={'Country': 'location'}, inplace=True)
# In[61]:
idx = data_countries_pc[0].groupby(['location'])['date'].transform(max) == data_countries_pc[0]['date']
sub_df=data_countries_pc[0][idx]
sub_df
# In[62]:
sub_df.iloc[0]['location']
# In[63]:
df_country_un_stats.head()
# In[ ]:
# In[ ]:
### freedom house
# In[72]:
df_freedomhouse_merged = pd.read_csv(DATA_PARENT/'data/freedom_house/df_freedomhouse_merged.csv')
# In[73]:
df_freedomhouse_merged.head()
# In[74]:
df_freedomhouse_merged.rename(columns={'Country': 'location'}, inplace=True)
# In[76]:
frames=[]
for df in data_countries_pc:
idx = df.groupby(['location'])['date'].transform(max) == df['date']
sub_df=df[idx]
if len(sub_df)>0:
#print(f'sub_df: {sub_df}')
country=sub_df.iloc[0]['location']
un_df=df_country_un_stats.loc[df_country_un_stats['location'] == country]
#print(f'un_df: {un_df}')
df_merged= | pd.merge(sub_df, un_df) | pandas.merge |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
self.assertEqual(ival_W.asfreq('A'), ival_W_to_A)
self.assertEqual(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
self.assertEqual(ival_W.asfreq('Q'), ival_W_to_Q)
self.assertEqual(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
self.assertEqual(ival_W.asfreq('M'), ival_W_to_M)
self.assertEqual(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
self.assertEqual(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
self.assertEqual(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
self.assertEqual(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
self.assertEqual(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
self.assertEqual(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
self.assertEqual(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
self.assertEqual(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
self.assertEqual(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
self.assertEqual(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
self.assertEqual(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
self.assertEqual(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
self.assertEqual(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
self.assertEqual(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
self.assertEqual(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
self.assertEqual(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
self.assertEqual(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
self.assertEqual(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
self.assertEqual(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
self.assertEqual(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
self.assertEqual(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
self.assertEqual(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
self.assertEqual(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
self.assertEqual(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
self.assertEqual(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
self.assertEqual(ival_W.asfreq('W'), ival_W)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_B.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B.asfreq('D'), ival_B_to_D)
self.assertEqual(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
self.assertEqual(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
self.assertEqual(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
self.assertEqual(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
self.assertEqual(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
self.assertEqual(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
self.assertEqual(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = | Period(freq="Q-JUN", year=2007, quarter=3) | pandas.Period |
# %%
import colorcet as cc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from adjustText import adjust_text
from graspologic.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspologic.utils import import_graph, pass_to_ranks
from matplotlib.collections import LineCollection
from scipy.spatial import ConvexHull
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
from umap import UMAP
def graphplot(
network=None,
embedding=None,
meta=None,
transform="pass_to_ranks",
embedding_algorithm="ase",
n_components=32,
n_neighbors=32,
min_dist=0.8,
metric="cosine",
hue=None,
group="hue",
group_convex_hull=False,
size="degree",
node_palette=None,
ax=None,
figsize=(10, 10),
sizes=(10, 30),
legend=False,
edge_hue="pre",
edge_palette=None,
edge_linewidth=0.2,
edge_alpha=0.2,
spines=False,
subsample_edges=False,
verbose=False,
random_state=None,
network_order=1,
normalize_power=False,
supervised_weight=False,
hue_labels=False,
hue_label_fontsize=None,
adjust_labels=False,
return_results=False,
tile=False,
tile_layout=None,
embed_kws={},
umap_kws={},
scatterplot_kws={},
):
results = {}
networkx = False
adj = import_graph(network).copy() # TODO allow for CSR
if random_state is None:
random_state = np.random.default_rng()
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.default_rng(random_state)
if transform == "pass_to_ranks":
adj = pass_to_ranks(adj)
if embedding is None:
# if we are given a graph, do an initial embedding
if verbose > 0:
print("Performing initial spectral embedding of the network...")
if embedding_algorithm == "ase":
embedder = AdjacencySpectralEmbed(
n_components=n_components, concat=True, **embed_kws
)
elif embedding_algorithm == "lse":
embedder = LaplacianSpectralEmbed(
form="R-DAD", n_components=n_components, concat=True, **embed_kws
)
if network_order == 2:
# TODO not sure how much this makes sense in practice, just something I've
# been playing around with
if normalize_power:
adj_normed = normalize(adj, axis=1)
embedding = embedder.fit_transform(adj_normed @ adj_normed)
else:
embedding = embedder.fit_transform(adj @ adj)
elif network_order == 1:
embedding = embedder.fit_transform(adj)
results["embedding"] = embedding
# if input is networkx, extract node metadata into a data frame
if isinstance(network, (nx.Graph, nx.DiGraph)):
networkx = True
index = list(sorted(network.nodes()))
meta = pd.DataFrame(index=index)
for attr in [hue, size]:
if attr is not None:
attr_map = nx.get_node_attributes(network, attr)
meta[attr] = meta.index.map(attr_map)
elif meta is None:
meta = pd.DataFrame(index=range(network.shape[0]))
index = meta.index
if embedding.shape[1] > 2:
if verbose > 0:
print("Performing UMAP embedding...")
# once we have the initial embedding, embed again down to 2D using UMAP
umapper = UMAP(
n_components=2,
n_neighbors=n_neighbors,
min_dist=min_dist,
metric=metric,
random_state=random_state.integers(np.iinfo(np.int32).max),
target_weight=supervised_weight,
**umap_kws,
)
if supervised_weight > 0:
if group == "hue":
group = hue
y = meta[group].values
_, y = np.unique(y, return_inverse=True)
else:
y = None
umap_embedding = umapper.fit_transform(embedding, y=y)
results["umap_embedding"] = umap_embedding
else:
umap_embedding = embedding
# TODO
mids = (umap_embedding.max(axis=0) + umap_embedding.min(axis=0)) / 2
umap_embedding -= mids
max_length = np.linalg.norm(umap_embedding, axis=1).max()
umap_embedding /= max_length
# add the UMAP embedding into the dataframe for plotting
columns = [f"umap_{i}" for i in range(umap_embedding.shape[1])]
plot_df = | pd.DataFrame(data=umap_embedding, columns=columns, index=meta.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import motor
import pygame
import sys
import ctypes
import signal
import os
import subprocess
import random
import time
import math
import cv2
import re
import logging
#BASIC PYGAME
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
screen_width = 1300
screen_height = 1000
screen = pygame.display.set_mode([screen_width, screen_height])
block_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
button_list = pygame.sprite.Group()
label_list = pygame.sprite.Group()
#CLASSES
class Block(pygame.sprite.Sprite):
def __init__(self, color, width, height,x_pos = None, y_pos = None, text = None,value = None, font_type = 'ubuntumono', font_size = 20, text_color = [0,0,0], text_x = 5, text_y = 5, direction = 0, index_x = None, index_y = None):
super().__init__()
self.size = [width,height]
self.image = pygame.Surface(self.size)
self.image.fill(color)
self.color = color
self.x = x_pos
self.y = y_pos
self.text_x = text_x
self.text_y = text_y
self.text = text
self.font_type = font_type
self.font_size = font_size
self.text_color = text_color
self.rect = self.image.get_rect()
self.font = pygame.font.SysFont(self.font_type, self.font_size)
self.textsurface = self.font.render(self.text, False, self.text_color)
self.image.blit(self.textsurface,(self.text_x, self.text_y))
self.direction = direction
self.index_x = index_x
self.index_y = index_y
self.value = value
def update(self, txt):
self.text = txt
self.image.fill(self.color)
self.textsurface = self.font.render(self.text, False, self.text_color)
self.image.blit(self.textsurface,(self.text_x, self.text_y))
def change_text(self, data_pos):
data_pos = data_pos + self.diection
return data_pos
# CAMERA PARAMETERS FOR LIVEVIEW
gp = ctypes.CDLL('/usr/local/lib/libgphoto2.so.6.0.0')
GP_OK = fileNbr = 0
GP_VERSION_VERBOSE = 1
cam = ctypes.c_void_p()
gp.gp_camera_new(ctypes.byref(cam))
ctx = gp.gp_context_new()
fil = ctypes.c_void_p()
gp.gp_file_new(ctypes.byref(fil))
old_tick = round(time.time())
#POSITIONING PARAMETERS
barrier = 724
b1 = 510
b2 = 545
b3 = 580
b4 = 615
b5 = 650
b6 = 685
b7 = 720
b8 = 755
b9 = 790
done = False
grid_x = 0
grid_y = 0
exposures = []
z_low = None
z_high = None
column = 200
# CREATING SURFACES
nav = pygame.surface.Surface((400, 400))
nav.fill((255,255,255))
camera_width = 70
camera_height = 50
camera = pygame.surface.Surface((camera_width,camera_height))
camera.fill((0,0,255))
camera.set_alpha(80)
nav_border = pygame.surface.Surface((505, 450))
nav_border.fill((155,155,155))
h_scale = pygame.surface.Surface((30, 400))
h_scale.fill((255,255,255))
camera_level = pygame.surface.Surface((30, 5))
camera_level.fill((30,30,255))
camera_low = pygame.surface.Surface((30, 20))
camera_low.fill([255,150,150])
camera_high = pygame.surface.Surface((30, 20))
camera_high.fill([255,255,140])
btx = 7
bty = 2
color1 = [255,255,255]
color2 = [100,100,100]
#navigator coordinates
nav_x_count = Block((155,155,155),125,25, text = 'X',text_color = [255,255,255])
nav_y_count = Block((155,155,155),125,25, text = 'Y',text_color = [255,255,255])
nav_z_count = Block((155,155,155),125,25, text = 'Z',text_color = [255,255,255])
#STARTING SCREEN
button_start = Block((0,0,0),1000,50, text = ' PLEASE PRESS ALL CALIBRATION BUTTONS AS A SECURITY CHECK ',text_color = [255,255,255],font_size = 40)
x_button_start = Block((255,255,0),250,100, text = 'X BUTTON', text_color = [0,0,0],text_x = 30,text_y = 30,font_size = 50)
y_button_start = Block((255,255,0),250,100, text = 'Y BUTTON', text_color = [0,0,0],text_x = 30,text_y = 30,font_size = 50)
#cols button
grid_cols_list = range(1,25)
grid_cols_index = 3
grid_cols_disp_text = "Cols: " + str(grid_cols_list[grid_cols_index])
grid_cols_disp = Block(color1,175,25, text = grid_cols_disp_text,text_color = color2,text_x = btx, text_y = bty)
grid_cols_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty, direction = 1)
grid_cols_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty, direction = -1)
grid_cols_up.rect.x = barrier + 220
grid_cols_up.rect.y = b1
grid_cols_down.rect.x = barrier + 185
grid_cols_down.rect.y = b1
button_list.add(grid_cols_down)
button_list.add(grid_cols_up)
#rows button
grid_rows_list = range(1,25)
grid_rows_index = 3
grid_rows_disp_text = "Rows: " +str(grid_rows_list[grid_rows_index])
grid_rows_disp = Block(color1,175,25, text = grid_rows_disp_text,text_color = color2,text_x = btx, text_y = bty)
grid_rows_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
grid_rows_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
grid_rows_up.rect.x = barrier + 220
grid_rows_up.rect.y = b2
grid_rows_down.rect.x = barrier + 185
grid_rows_down.rect.y = b2
button_list.add(grid_rows_down)
button_list.add(grid_rows_up)
#shutter button
shutter_list = ["0.0002s","0.0003s","0.0004s","0.0005s","0.0006s","0.0008s","0.0010s",
"0.0012s","0.0015s","0.0020s","0.0025s","0.0031s","0.0040s","0.0050s",
"0.0062s","0.0080s","0.0100s","0.0125s","0.0166s","0.0200s","0.0250s",
"0.0333s","0.0400s","0.0500s","0.0666s","0.0769s","0.1000s","0.1250s",
"0.1666s","0.2000s","0.2500s","0.3333s","0.4000s","0.5000s","0.6250s",
"0.7692s","1.0000s","1.3000s","1.6000s","2.0000s","2.5000s","3.0000s",
"4.0000s","5.0000s","6.0000s","8.0000s","10.0000s","13.0000s","15.0000s",
"20.0000s","25.0000s","30.0000s"]
shutter_index = 15
shutter_disp_text = "Shutter: " + str(shutter_list[shutter_index])
shutter_disp = Block((200,255,255),175,25, text = shutter_disp_text,text_color = color2,text_x = btx, text_y = bty)
shutter_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
shutter_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
shutter_up.rect.x = barrier + 220 + 260
shutter_up.rect.y = b2
shutter_down.rect.x = barrier + 185 + 260
shutter_down.rect.y = b2
button_list.add(shutter_down)
button_list.add(shutter_up)
#iso button
iso_list = ["100","200","300","400","800","1600","3200","6400","12800"]
iso_index = 0
iso_disp_text = "Iso: " + str(iso_list[iso_index])
iso_disp = Block((200,255,255),175,25, text = iso_disp_text,text_color = color2,text_x = btx, text_y = bty)
iso_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
iso_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
iso_up.rect.x = barrier + 220 + 260
iso_up.rect.y = b1
iso_down.rect.x = barrier + 185 + 260
iso_down.rect.y = b1
button_list.add(iso_down)
button_list.add(iso_up)
#zstep button
z_step_list = range(1,30)
z_step_index = 2
z_step_text = "Z Step: " + str(z_step_list[z_step_index])
z_step_disp = Block(color1,175,25, text = z_step_text,text_color = color2,text_x = btx, text_y = bty)
z_step_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
z_step_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
z_step_up.rect.x = barrier + 220
z_step_up.rect.y = b5
z_step_down.rect.x = barrier + 185
z_step_down.rect.y = b5
button_list.add(z_step_down)
button_list.add(z_step_up)
layers = math.ceil(column/z_step_list[z_step_index])
#x_step button
x_step_list = range(70,95,2)
x_step_index = 8
x_step_text = "Overlap: " + str(x_step_list[x_step_index])
x_step_disp = Block(color1,175,25, text = x_step_text,text_color = color2,text_x = btx, text_y = bty)
x_step_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
x_step_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
x_step_up.rect.x = barrier + 220
x_step_up.rect.y = b3
x_step_down.rect.x = barrier + 185
x_step_down.rect.y = b3
button_list.add(x_step_down)
button_list.add(x_step_up)
#y_step button ----- CHANGED TO CAMERA SIZE
y_step_list = range(12,180,2)
y_step_index = 9
y_step_text = "CAM: " + str(y_step_list[y_step_index])
y_step_disp = Block(color1,175,25, text = y_step_text,text_color = color2,text_x = btx, text_y = bty)
y_step_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
y_step_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
y_step_up.rect.x = barrier + 220
y_step_up.rect.y = b4
y_step_down.rect.x = barrier + 185
y_step_down.rect.y = b4
button_list.add(y_step_down)
button_list.add(y_step_up)
#Image quality button
img_list = [["JPEG Basic","3008x2000",0.2],["JPEG Basic","4512x3000",0.4],["JPEG Basic","6016x4000",0.6],\
["JPEG Normal","3008x2000",0.6],["JPEG Normal","4512x3000",0.8],["JPEG Normal","6016x4000",1],\
["JPEG Fine","3008x2000",1.2],["JPEG Fine","4512x3000",1.4],["JPEG Fine","6016x4000",1.6],\
["NEF (Raw)","6016x4000",2]]
img_index = 9
img_text = "Image Q: " + str(img_list[img_index][0] + img_list[img_index][1])
img_disp = Block(color1,175,25, text = img_text,text_color = color2,text_x = btx, text_y = bty,font_size = 14)
img_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
img_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
img_up.rect.x = barrier + 220
img_up.rect.y = b6
img_down.rect.x = barrier + 185
img_down.rect.y = b6
button_list.add(img_down)
button_list.add(img_up)
#canny parameter 1
canny1_list = range(5,200,5)
canny1_index = 8
canny1_text = "Canny1: " + str(canny1_list[canny1_index])
canny1_disp = Block(color1,175,25, text = canny1_text,text_color = color2,text_x = btx, text_y = bty)
canny1_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
canny1_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
canny1_up.rect.x = barrier + 220
canny1_up.rect.y = b7
canny1_down.rect.x = barrier + 185
canny1_down.rect.y = b7
button_list.add(canny1_down)
button_list.add(canny1_up)
#canny parameter 2
canny2_list = range(5,200,5)
canny2_index = 18
canny2_text = "Canny2: " + str(canny2_list[canny2_index])
canny2_disp = Block(color1,175,25, text = canny2_text,text_color = color2,text_x = btx, text_y = bty)
canny2_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
canny2_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
canny2_up.rect.x = barrier + 220
canny2_up.rect.y = b8
canny2_down.rect.x = barrier + 185
canny2_down.rect.y = b8
button_list.add(canny2_down)
button_list.add(canny2_up)
#threshold button
threshold_list = range(10)
threshold_index = 1
threshold_text = "Treshold: " + str(canny2_list[canny2_index])
threshold_disp = Block(color1,175,25, text = canny2_text,text_color = color2,text_x = btx, text_y = bty)
threshold_up = Block(color1,25,25, text = ">",text_color = color2,text_x = btx, text_y = bty,direction = 1)
threshold_down = Block(color1,25,25, text = "<",text_color = color2,text_x = btx, text_y = bty,direction = -1)
threshold_up.rect.x = barrier + 220
threshold_up.rect.y = b9
threshold_down.rect.x = barrier + 185
threshold_down.rect.y = b9
button_list.add(threshold_down)
button_list.add(threshold_up)
#set Grid button
grid_set = Block([200,200,200],245,25, text = " SET GRID",text_color = color2,text_x = btx, text_y = bty)
grid_set.rect.x = barrier + 260
grid_set.rect.y = b6
button_list.add(grid_set)
#add button
add_button = Block([130,255,255],245,25, text = " ADD CAMERA FRAME",text_color = color2,text_x = btx, text_y = bty)
add_button.rect.x = barrier + 260
add_button.rect.y = b3
button_list.add(add_button)
#add set bottom
lowest_focus = Block([255,150,150],245,25, text = " SET LOWEST FOCUS",text_color = color2,text_x = btx, text_y = bty)
lowest_focus.rect.x = barrier + 260
lowest_focus.rect.y = b8
button_list.add(lowest_focus)
#add highest
highest_focus = Block([255,255,140],245,25, text = " SET HIGHEST FOCUS",text_color = color2,text_x = btx, text_y = bty)
highest_focus.rect.x = barrier + 260
highest_focus.rect.y = b7
button_list.add(highest_focus)
#EXPOSURE CLEAR
exposure_clear = Block([255,255,255],245,25, text = " CLEAR EXPOSURES",text_color = color2,text_x = btx, text_y = bty)
exposure_clear.rect.x = barrier + 260
exposure_clear.rect.y = b5
button_list.add(exposure_clear)
#PREVIEW ALL
preview_all = Block([130,255,255],245,25, text = " PREVIEW ALL FRAMES",text_color = color2,text_x = btx, text_y = bty)
preview_all.rect.x = barrier + 260
preview_all.rect.y = b4
button_list.add(preview_all)
#canny button
preview_button = Block([255,255,255],120,100, text = "CANNY",text_color = color2,text_x = 25, text_y = 40)
preview_button.rect.x = 20
preview_button.rect.y = b1
button_list.add(preview_button)
#run button
run_button = Block([100,255,100],120,100, text = "RUN",text_color = color2,text_x = 45, text_y = 40)
run_button.rect.x = 20
run_button.rect.y = b4
button_list.add(run_button)
#orientation button
orientation_button = Block([255,255,255],120,100, text = "ORIENTATION",text_color = color2,text_x = 5, text_y = 40)
orientation_button.rect.x = 150
orientation_button.rect.y = b1
button_list.add(orientation_button)
orientation_list = ["Horizontal","Vertical"]
orientation_index = 1
#reset button
reset_button = Block([255,255,255],120,100, text = "RESET",text_color = color2,text_x = 37, text_y = 40)
reset_button.rect.x = 150
reset_button.rect.y = b4
button_list.add(reset_button)
# run labels
project_name_disp = Block([250,250,250],425,25, text = "PROJECT NAME: ",text_color = color2,text_x = 130, text_y = 3)
text = ""
project_name_disp.rect.x = 280
project_name_disp.rect.y = b1
button_list.add(project_name_disp)
run_label = Block([230,230,230],425,25, text = "RUN SPECIFICATIONS",text_color = color2,text_x = 130, text_y = 3)
label_font = 18
run_label1_text = "COLS:"+str(grid_cols_list[grid_cols_index]) + " ROWS:"+ str(grid_rows_list[grid_rows_index])+" ORIENTATION:" + orientation_list[orientation_index]+ " Expo:" + str(len(exposures)) + " ImageQ:" + str(img_list[img_index])
run_label2_text = "Xstep:" + str(x_step_list[x_step_index]) + " Ystep:" + str(y_step_list[y_step_index]) + " Zstep:" + str(z_step_list[z_step_index]) + " Grid:" + str(grid_x) + "," + str(grid_y) + " Zdepth:" + str(layers)
run_label1 = Block([230,230,230],425,25, text = run_label1_text,text_color = color2,text_x = 3, text_y = 3,font_size = label_font)
run_label2 = Block([230,230,230],425,25, text = run_label2_text,text_color = color2,text_x = 3, text_y = 3,font_size = label_font)
#---------------- CAMERA FUNCTIONS -------------------
def dslr(command, cycle=0):
camera_process = subprocess.call(command)
cycles = cycle
if camera_process != 0:
if cycles < 10:
cycles +=1
time.sleep(1)
dslr(command,cycles)
else:
try:
logging.warning("Error after 10 cycles trying to execute Dslr command: ",command)
except:
print("Error after 10 cycles trying to execute Dslr command: ",command)
def cam_shutter(value):
return ["gphoto2","--set-config", "/main/capturesettings/shutterspeed=" + value]
def cam_iso(value):
return ["gphoto2","--set-config", "/main/imgsettings/iso=" + value]
def cam_img_size(value):
return ["gphoto2","--set-config","/main/other/5003=" + value]
def cam_img_quality(string):
return ["gphoto2","--set-config","/main/capturesettings/imagequality=" + string]
def camera_match(expression):
camera_files = subprocess.check_output(['gphoto2','--list-files'])
m = re.findall(expression,camera_files)
return m
def newfolder(name):
os.makedirs(name)
os.chdir(name)
def killstart():
p= subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate ()
for line in out.splitlines() :
if b'gvfsd-gphoto2' in line:
pid = int(line.split(None,1) [0])
os.kill(pid, signal.SIGKILL)
def find_delete_folder():
folder = camera_match(r"/store_00010001/DCIM/1.......")
return ["gphoto2","--folder",folder, "-R", "--delete-all-files"]
clear = ["gphoto2","--folder", "/store_00010001/DCIM/100D3200", "-R", "--delete-all-files"]
trigger = ["gphoto2","--trigger-capture"]
download = ["gphoto2","--get-all-files"]
memorycard = ["gphoto2","--set-config", "capturetarget=1"]
def files_num(i):
order = ["gphoto2","--folder"]
directory = "/store_00010001/DCIM/10{}D3200".format(i)
ext = ["-R", "--num-files"]
order.append(directory)
order.extend(ext)
pros = subprocess.check_output(order)
numberr = int(pros[59:])
return numberr
def check(rtn):
if rtn != GP_OK:
gp.gp_result_as_string.restype = ctypes.c_char_p
print('!! ERROR(%s) %s' % (rtn,gp.gp_result_as_string(rtn)))
sys.exit(0)
# CREATING GRAPHICAL REPRESENTATION OF A GRID
def grid_create(p_x, p_y,x_motor,y_motor,frame_width,frame_height,cols,rows):
grid_color=[225,225,255]
overlap_color = [210,210,255]
overlap_x = frame_width - x_motor
overlap_y = frame_height - y_motor
pos_y = p_y
for i in range(rows):
pos_x = p_x
for l in range(cols):
if l == 0 or l == (cols-1):
draw_width = frame_width - overlap_x
else:
draw_width = frame_width - (2*overlap_x)
if i == 0 or i == (rows - 1):
draw_height = frame_height - overlap_y
else:
draw_height = frame_height - (2*overlap_y)
if l != 0 and i != 0:
block = Block(grid_color,draw_width,draw_height,(pos_x - overlap_x),(pos_y - overlap_y),index_x = l,index_y = i)
elif l != 0:
block = Block(grid_color,draw_width,draw_height,(pos_x - overlap_x),pos_y,index_x = l,index_y = i)
elif i != 0:
block = Block(grid_color,draw_width,draw_height,pos_x,(pos_y - overlap_y),index_x = l,index_y = i)
else:
block = Block(grid_color,draw_width,draw_height,pos_x,pos_y,index_x = l,index_y = i)
block.rect.x = pos_x
block.rect.y = pos_y
block_list.add(block)
all_sprites_list.add(block)
if i != (rows-1):
over = Block(overlap_color,draw_width,overlap_y)
over.rect.x = pos_x
over.rect.y = pos_y + draw_height
all_sprites_list.add(over)
pos_x = pos_x + draw_width
if l != (cols-1):
if i != (rows-1):
over = Block(overlap_color,overlap_x,(draw_height + overlap_y))
over.rect.x = pos_x
over.rect.y = pos_y
all_sprites_list.add(over)
pos_x = pos_x + overlap_x
else:
over = Block(overlap_color,overlap_x,draw_height)
over.rect.x = pos_x
over.rect.y = pos_y
all_sprites_list.add(over)
pos_x = pos_x + overlap_x
pos_y = pos_y + draw_height + overlap_y
def calibrate_xy():
x = motor.button_x()
y = motor.button_y()
calibration_count_x = 0
calibration_count_y = 0
print("Starting Calibration")
while not x or not y and calibration_count_x < 8500 and calibration_count_y < 8500:
x = motor.button_y()
y = motor.button_x()
if not x and not y:
motor.small_bottom_left(5)
calibration_count_x = calibration_count_x + 5
calibration_count_y = calibration_count_y + 5
elif not x:
motor.small_left(5)
calibration_count_x = calibration_count_x + 5
elif not y:
motor.small_down(5)
calibration_count_y = calibration_count_y + 5
print("Calibration Ended")
calibration = (calibration_count_x,calibration_count_y)
return calibration
done = False
click_pos = [None,None]
go_to = False
killstart()
time.sleep(0.5)
check(gp.gp_camera_init(cam, ctx))
print('** camera connected')
camera_width = (y_step_list[y_step_index])
camera_height = round((camera_width*2)/3)
pygame.display.set_caption('Macro Viewer')
#---------BUTTON SECURITY CHECK ------------
screen.blit(button_start.image,(160,300))
screen.blit(x_button_start.image,(700,500))
screen.blit(y_button_start.image,(300,500))
pygame.display.update()
check_x = False
check_y = False
while not check_x or not check_y:
x = motor.button_y()
y = motor.button_x()
if x:
check_x = True
x_button_start.image.fill([0,255,0])
screen.blit(x_button_start.image,(700,500))
if y:
check_y = True
y_button_start.image.fill([0,255,0])
screen.blit(y_button_start.image,(300,500))
pygame.display.update()
button_start.update("CALIBRATING...")
screen.fill([0,0,0])
screen.blit(button_start.image,(500,300))
screen.blit(y_button_start.image,(300,500))
screen.blit(x_button_start.image,(700,500))
pygame.display.update()
# -------------------------PARAMETERS----------------------------
buffer_wait = 0.5
download_time = 2
max_cycles = 8
cycles_sleep = 1
pos = [0,0]
nef_mode = True
smotor = 15
cali = calibrate_xy()
print(cali)
count_x = 0
count_y = 0
count_z = None
camera_x = 0
camera_y = 400 - camera_height
z_high = None
z_low = None
z_level = None
change = True
motor_mode = False
run_mode = False
canny_on = False
current_frame = None
grid_x = 50
grid_y = 50
b = [b5,b6,b7,b8,b9]
ce = 0
screen.fill([230,230,230])
lens = "4X"
project_name = ""
cam_bug_index = 0
shot_n = 0
shot_fail = False
############################### Positioning Gui Loop ###########################
while not done:
#------------------------ CAMERA COMMUNICATION ----------------
clock.tick(60) #60fps
try:
check(gp.gp_camera_capture_preview(cam, fil, ctx))
cData = ctypes.c_void_p()
cLen = ctypes.c_ulong()
check(gp.gp_file_get_data_and_size(fil, ctypes.byref(cData), ctypes.byref(cLen)))
img = ctypes.string_at(cData.value, cLen.value)
open('img1.jpg','wb').write(img)
feed = pygame.image.load("img1.jpg")
except:
time.sleep(2)
check(gp.gp_camera_exit(cam, ctx))
time.sleep(2)
check(gp.gp_camera_init(cam, ctx))
time.sleep(2)
check(gp.gp_camera_capture_preview(cam, fil, ctx))
cData = ctypes.c_void_p()
cLen = ctypes.c_ulong()
check(gp.gp_file_get_data_and_size(fil, ctypes.byref(cData), ctypes.byref(cLen)))
img = ctypes.string_at(cData.value, cLen.value)
open('img1.jpg','wb').write(img)
feed = pygame.image.load("img1.jpg")
if canny_on:
frame = cv2.imread('img1.jpg',0)
canny = cv2.Canny(frame,canny1_list[canny1_index],canny2_list[canny2_index])
hist = cv2.calcHist(canny,[0],None,[2],[0,256])
cv2.imwrite('canny.jpg',canny)
feed = pygame.image.load("canny.jpg")
preview_button.update("CANNY: "+str(int(hist[1])))
#------------------------------ RUN MODE -------------------------------
if run_mode:
if go_to:
pass
else:
eadge = []
os.chdir("/home/pi/green/")
while (count_z - z_step) > 0:
try:
check(gp.gp_camera_capture_preview(cam, fil, ctx))
cData = ctypes.c_void_p()
cLen = ctypes.c_ulong()
check(gp.gp_file_get_data_and_size(fil, ctypes.byref(cData), ctypes.byref(cLen)))
img = ctypes.string_at(cData.value, cLen.value)
open('img1.jpg','wb').write(img)
feed1 = pygame.image.load("img1.jpg")
except:
time.sleep(2)
check(gp.gp_camera_exit(cam, ctx))
time.sleep(2)
check(gp.gp_camera_init(cam, ctx))
time.sleep(2)
check(gp.gp_camera_capture_preview(cam, fil, ctx))
cData = ctypes.c_void_p()
cLen = ctypes.c_ulong()
check(gp.gp_file_get_data_and_size(fil, ctypes.byref(cData), ctypes.byref(cLen)))
img = ctypes.string_at(cData.value, cLen.value)
open('img1.jpg','wb').write(img)
feed1 = pygame.image.load("img1.jpg")
frame = cv2.imread('img1.jpg',0)
screen.blit(feed1, (20,40))
canny = cv2.Canny(frame,canny1_list[canny1_index],canny2_list[canny2_index])
hist = cv2.calcHist(canny,[0],None,[2],[0,256])
cv2.imwrite('canny.jpg',canny)
feed = pygame.image.load("canny.jpg")
screen.blit(feed, (20,b1))
hist = cv2.calcHist(canny,[0],None,[2],[0,256])
eadge.append(int(hist[1]))
motor.focus_down(z_step)
count_z -= z_step
pygame.display.update()
check(gp.gp_camera_exit(cam, ctx))
logging.info(("Starting run x {} | y {}!".format(str(grid_index_x),str(grid_index_y))))
s_count = 0
time.sleep(1)
for i,e in enumerate(eadge[::-1]):
if e > threshold:
s_count +=1
logging.info("SHOT! Position:{}|Canny{}".format(i,e))
for button in label_list:
#Configure this boolean for control flow later
shot_fail = False
#this is the basic photo process
dslr(cam_iso(button.value[1]))
time.sleep(0.3)
dslr(cam_shutter(button.value[0]))
time.sleep(0.3)
dslr(trigger)
time.sleep(float(button.value[0].replace("s",""))*2)
d1 = time.time()
time.sleep(img_list[img_index][2])
#Now we validate if the shot is in the camera memory
#We compare the count of shots in the program to the amount found un the folder that stores the images on the camera
shot_n += 1
#every 999 photos the nikon D3200 creates a new directory, this needs a quick fix
cam_bug_index = math.floor(shot_n/999)
#look refers to images found in camera file
look = files_num(cam_bug_index)
#look2 refers to count in the program
look2 = (shot_n%999)
#these lines are just so there is no problem on file 999.
if look2 == 0:
look2 = 999
else:
cam_bug_index = math.floor(shot_n/999)
logging.info("CAM/REG -- {}/{}".format(look,look2))
# A cicles count for validating the photo in the camera
check_cycles = 0
d2 = time.time()
dt = d2-d1
#validation loop if the file has not been found on the camera yet, if more than max_cycles ocure an error is raised
while look != look2:
time.sleep(img_list[img_index][2])
look = files_num(cam_bug_index)
look2 = len(shot_canny)
logging.info("CHECK CICLE {} -- {}/{}".format(check_cycles,look,look2))
check_cycles += 1
d2 = time.time()
dt = d2-d1
if check_cycles > max_cycles:
logging.warning("Faliure in shot x{}|y{}|z{}|s{}|i{}, removing from csv, total = {}".format(grid_index_x,grid_index_y,i,button.value[0],button.value[1],total_err))
total_err +=1
shot_n -=1
shot_fail = True
break
#If nothing happened update the data of the run.
if not shot_fail:
shot_canny.append(e)
shot_z_pos.append(i)
shot_iso.append(button.value[1])
shot_shutter.append(button.value[0])
shot_grid_x.append(grid_index_x)
shot_grid_y.append(grid_index_y)
shot_dt.append(dt)
shot_time.append(d1)
camera_files = subprocess.check_output(['gphoto2','--list-files'], universal_newlines = True)
expression = r"DSC_........"
run_files = re.findall(expression,camera_files)
d = {"name":run_files,"grid_x":shot_grid_x,"grid_y":shot_grid_y, "z_pos":shot_z_pos, "canny":shot_canny,\
"iso":shot_iso, "shutter":shot_shutter,"dt":shot_dt,"time":shot_time}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
allrawdataHCAorBoth= | pd.concat([mghcleandata,washuacleandata,umnacleandata,uclaacleandata],axis=0) | pandas.concat |
from __future__ import absolute_import
# PopulationSim
# See full license in LICENSE.txt.
import logging
import pandas as pd
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core.config import setting
from ..balancer import do_balancing
from .helper import get_control_table
from .helper import weight_table_name
logger = logging.getLogger(__name__)
@inject.step()
def initial_seed_balancing(settings, crosswalk, control_spec, incidence_table):
"""
Balance the household weights for each of the seed geographies (independently)
using the seed level controls and the aggregated sub-zone controls totals.
Create the seed_weights table with one row per household and columns contaiing
household_id, seed geography (e.g. PUMA), and float preliminary_balanced_weights
Adds seed_weights table to pipeline named <seed_geography>_weights (e.g. PUMA_weights):
+--------+------+-----------------------------+-------+
| index | PUMA | preliminary_balanced_weight | hh_id |
| hh_id | | | |
+========+======+=============================+=======+
| 0 | 600 | 0.313555 | 0 |
| 1 | 601 | 0.627110 | 1 |
| 2 | 602 | 0.313555 | 2 |
| ... | | | |
+--------+------+-----------------------------+-------+
Parameters
----------
settings : dict (settings.yaml as dict)
crosswalk : pipeline table
control_spec : pipeline table
incidence_table : pipeline table
"""
crosswalk_df = crosswalk.to_frame()
incidence_df = incidence_table.to_frame()
control_spec = control_spec.to_frame()
seed_geography = settings.get('seed_geography')
seed_controls_df = get_control_table(seed_geography)
# only want control_spec rows for seed geography and below
geographies = settings['geographies']
seed_geographies = geographies[geographies.index(seed_geography):]
seed_control_spec = control_spec[control_spec['geography'].isin(seed_geographies)]
# determine master_control_index if specified in settings
total_hh_control_col = setting('total_hh_control')
max_expansion_factor = settings.get('max_expansion_factor', None)
min_expansion_factor = settings.get('min_expansion_factor', None)
# run balancer for each seed geography
weight_list = []
sample_weight_list = []
seed_ids = crosswalk_df[seed_geography].unique()
for seed_id in seed_ids:
logger.info("initial_seed_balancing seed id %s" % seed_id)
seed_incidence_df = incidence_df[incidence_df[seed_geography] == seed_id]
status, weights_df, controls_df = do_balancing(
control_spec=seed_control_spec,
total_hh_control_col=total_hh_control_col,
max_expansion_factor=max_expansion_factor,
min_expansion_factor=min_expansion_factor,
incidence_df=seed_incidence_df,
control_totals=seed_controls_df.loc[seed_id],
initial_weights=seed_incidence_df['sample_weight'])
logger.info("seed_balancer status: %s" % status)
if not status['converged']:
raise RuntimeError("initial_seed_balancing for seed_id %s did not converge" % seed_id)
balanced_weights = weights_df['final']
logger.info("Total balanced weights for seed %s = %s" % (seed_id, balanced_weights.sum()))
weight_list.append(balanced_weights)
sample_weight_list.append(seed_incidence_df['sample_weight'])
# bulk concat all seed level results
weights = | pd.concat(weight_list) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
def district_Daily_Status(district,districts):
#################### Getting the Data ###############
data = districts.copy()
daily_Confirmed = -(data[data['District']==district].tail(16).Confirmed - data[data['District']==district].tail(16).Confirmed.shift(-1))
daily_Confirmed= | pd.to_numeric(daily_Confirmed[:-1],downcast="integer") | pandas.to_numeric |
"""
Generate fake data for schools infection survey raw input data.
"""
from mimesis.schema import Field, Schema
import pandas as pd
from datetime import datetime, timedelta
from pathlib import Path
_ = Field('en-gb', seed=42)
def generate_survey_schools(directory, file_date, records):
"""
Generate survey schools file.
"""
survey_schools_description = (
lambda: {
'schl_nm': " ".join(_('text.words', quantity=4)).title(),
'schl_post_cde': _('address.postal_code'),
'schl_urn': _('random.custom_code', mask='######', digit='#'),
'studyconsent': _('numbers.integer_number', start=0, end=1),
'schl_child_cnt': _('numbers.integer_number', start=50, end=100),
# Don't think we use individual year counts in the pipeline
'head_teacher_nm': _('full_name'),
'school_telephone_number': _('person.telephone'),
'school_contact_email': _('person.email', domains=['gsnail.ac.uk']),
'information_consent': _('numbers.integer_number', start=0, end=1),
'schl_la_nm': _('address.state'),
'change_indicator': _('numbers.integer_number', start=0, end=1),
'last_change_datetime': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
'record_created_date': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
}
)
schema = Schema(schema=survey_schools_description)
survey_schools = pd.DataFrame(schema.create(iterations=records))
survey_schools.to_csv(directory / f"survey_schools_{file_date}.csv", index=False)
return survey_schools
def generate_survey_participants(directory, file_date, records, school_urns):
"""
Generate survey participants file. Depends on survey schools file.
"""
survey_participants_description = (
lambda: {
'participant_type': _('choice', items=['type_1', 'type_2']),
# Assume we don't need real types, but do need enrolment questions per type
'participant_id': _('random.custom_code', mask='P#########', digit='#'),
'parent_participant_id': _('random.custom_code', mask='P#########', digit='#'),
'participant_first_nm': _('person.first_name'),
'participant_family_name': _('person.last_name'),
'email_addrs': _('person.email', domains=['gsnail.ac.uk']),
'schl_urn': _('choice', items=list(school_urns)),
'consent': _('numbers.integer_number', start=0, end=1),
'change_date': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
'record_created_date': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
}
)
schema = Schema(schema=survey_participants_description)
survey_participants = pd.DataFrame(schema.create(iterations=records))
# Type 2 doesn't have registered parents
survey_participants.loc[survey_participants["participant_type"] == "type_2", "parent_participant_id"] = pd.NA
survey_participants.to_csv(directory / f"survey_participants_{file_date}.csv", index=False)
return survey_participants
def generate_survey_responses(directory, file_date, records, participant_ids, school_ids):
"""
Generate survey responses file. Depends on survey participants and schools files.
"""
survey_responses_description = (
lambda: {
'participant_id': _('choice', items=list(participant_ids)),
'question_id': _('random.custom_code', mask='Q#####', digit='#'),
'question_response_text': _('text.sentence'),
'last_change_datetime': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
'record_created_datetime': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802)
}
)
schema = Schema(schema=survey_responses_description)
survey_responses = pd.DataFrame(schema.create(iterations=records))
survey_responses.to_csv(directory / f"survey_responses_{file_date}.csv", index=False)
return survey_responses
def generate_lab_swabs(directory, file_date, records):
"""
Generate lab swabs file.
"""
lab_swabs_description = (
lambda: {
'Sample':_('random.custom_code', mask='SIS########', digit='#'),
'Result': _('choice', items=["Positive", "Negative"]),
'Date Tested': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=1800, end=1802),
'Seq-Target': "A gene",
'Seq-Result': _('choice', items=["Positive", "Negative"])
}
)
schema = Schema(schema=lab_swabs_description)
lab_swabs = pd.DataFrame(schema.create(iterations=records))
lab_swabs.to_csv(directory / f"lab_swabs_{file_date}.csv", index=False)
return lab_swabs
def generate_lab_bloods(directory, file_date, records):
"""
Generate lab bloods file.
"""
lab_bloods_description = (
lambda: {
'specimenId': _('random.custom_code', mask='#########THR', digit='#'),
'specimenProcessedDate': _('datetime.formatted_datetime', fmt="%Y-%m-%dT%H:%M:%SZ", start=1800, end=1802),
'testResult': _('choice', items=['Positive', 'Negative'])
}
)
schema = Schema(schema=lab_bloods_description)
lab_bloods = pd.DataFrame(schema.create(iterations=records))
lab_bloods.to_csv(directory / f"lab_bloods_{file_date}.csv", index=False)
return lab_bloods
def generate_lab_saliva(directory, file_date, records):
"""
Generate lab saliva file.
"""
lab_saliva_description = (
lambda: {
'ORDPATNAME': _('random.custom_code', mask='SIS########', digit='#'),
'SAMPLEID': _('random.custom_code', mask='H#########', digit='#'),
'IgG Capture Result': _('choice', items=['#r', '#n', '#e'])
}
)
schema = Schema(schema=lab_saliva_description)
lab_saliva = pd.DataFrame(schema.create(iterations=records))
lab_saliva.to_csv(directory / f"lab_saliva_{file_date}.csv", index=False)
return lab_saliva
def generate_survey_visits(directory, file_date, records, participant_ids, swab_barcodes, blood_barcodes, saliva_barcodes):
"""
Generate survey visits file. Depends on survey participants and schools files.
"""
survey_visits_description = (
lambda: {
'participant_id': _('choice', items=list(participant_ids)),
'visit_date': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=1800, end=1802),
'swab_Sample_barcode': _('choice', items=list(swab_barcodes) + [pd.NA]),
'blood_thriva_barcode': _('choice', items=list(blood_barcodes) + [pd.NA]),
'oral_swab_barcode': _('choice', items=list(saliva_barcodes)+ [pd.NA]),
'last_change_datetime': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802),
'record_created_datetime': _('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M:%S", start=1800, end=1802)
}
)
schema = Schema(schema=survey_visits_description)
survey_visits = pd.DataFrame(schema.create(iterations=records))
survey_visits.to_csv(directory / f"survey_visits_{file_date}.csv", index=False)
return survey_visits
def generate_question_lookup(directory, file_date, records, question_ids):
"""
Generate question id to name lookup. Depends on survey responses file.
"""
question_lookup_description = (
lambda: {
'question_id': _('choice', items=question_ids),
'new_variables_names': "_".join(_('text.words', quantity=4)).lower()
}
)
schema = Schema(schema=question_lookup_description)
question_lookup = pd.DataFrame(schema.create(iterations=records))
question_lookup.to_csv(directory / f"question_lookup_{file_date}.csv", index=False)
return question_lookup
if __name__ == "__main__":
raw_dir = Path("raw_schools")
swab_dir = raw_dir / "swab"
blood_dir = raw_dir / "blood"
saliva_dir = raw_dir / "saliva"
survey_dir = raw_dir / "survey"
lookup_dir = raw_dir / "lookup"
for directory in [swab_dir, blood_dir, saliva_dir, survey_dir, lookup_dir]:
directory.mkdir(parents=True, exist_ok=True)
file_date = datetime.strptime("18010101", "%Y%m%d")
lab_date_1 = datetime.strftime(file_date - timedelta(days=1), format="%Y%m%d")
lab_date_2 = datetime.strftime(file_date - timedelta(days=2), format="%Y%m%d")
file_date = datetime.strftime(file_date, format="%Y%m%d")
schools = generate_survey_schools(survey_dir, file_date, 10)
participants = generate_survey_participants(
survey_dir,
file_date,
40,
schools["schl_urn"].unique().tolist()
)
responses = generate_survey_responses(
survey_dir,
file_date,
100,
participants["participant_id"].unique().tolist(),
participants["schl_urn"].unique().tolist()
)
lab_swabs_1 = generate_lab_swabs(swab_dir, file_date, 10)
lab_swabs_2 = generate_lab_swabs(swab_dir, lab_date_1, 10)
lab_swabs_3 = generate_lab_swabs(swab_dir, lab_date_2, 10)
lab_swabs = pd.concat([lab_swabs_1, lab_swabs_2, lab_swabs_3])
lab_bloods_1 = generate_lab_bloods(blood_dir, file_date, 10)
lab_bloods_2 = generate_lab_bloods(blood_dir, lab_date_1, 10)
lab_bloods_3 = generate_lab_bloods(blood_dir, lab_date_2, 10)
lab_bloods = | pd.concat([lab_bloods_1, lab_bloods_2, lab_bloods_3]) | pandas.concat |
# -*- coding: utf-8 -*-
import pickle, os
import pandas as pd
from typing import List, Dict
# from sklearn.externals import joblib
from sklearn.tree import DecisionTreeClassifier as DCF
from sklearn.model_selection import train_test_split
from sklearn import metrics
from .normalize import (
column_mapping,
map_columns,
GP_VALUE,
GRADE_VALUE,
uppercase
)
print(os.listdir('.'))
FEATURE_COLUMNS = [
'MATNO',
'MTH101',
'GST101',
'MTH103',
'STA101',
'CSC102',
'MTH102',
'PHY101',
'PHY103'
]
TARGET_COLUMN = [
'FGRADE'
]
COLUMNS = [
*FEATURE_COLUMNS,
*TARGET_COLUMN
]
DATA_FILE = '../results.xlsx'
TEST_SIZE = 0.5
RANDOM_STATE = 1
# file_data = pd.read_excel(DATA_FILE)
def read_file_data(dataset: str) -> pd.DataFrame:
return pd.read_excel(dataset)
def produce_dataframe(file_data: pd.DataFrame, columns: List[str]=None) -> pd.DataFrame:
data_frame = | pd.DataFrame(file_data, columns=columns) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
| tm.assert_frame_equal(expected, result) | pandas.util.testing.assert_frame_equal |
#### Master Script 10b: Perform interrepeat hyperparameter configuration dropout on deep learning all-predictor-based models (APM) ####
#
# <NAME>
# University of Cambridge
# email address: <EMAIL>
#
### Contents:
# I. Initialisation
# II. Calculate ORC of extant validation predictions
# III. Prepare bootstrapping resamples for configuration dropout
# IV. Dropout configurations that are consistently (a = .05) inferior in performance
# V. Compile and save validation and testing set predictions across partitions
### I. Initialisation
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from shutil import rmtree
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from argparse import ArgumentParser
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# PyTorch, PyTorch.Text, and Lightning-PyTorch methods
import torch
from torch import nn, optim, Tensor
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchtext.vocab import Vocab
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# TQDM for progress tracking
from tqdm import tqdm
# Custom methods
from functions.model_building import calc_orc
# Set the last repeat completed
REPEAT = 20
# Set version code
VERSION = 'DEEP_v1-0'
# Define model output directory based on version code
model_dir = '/home/sb2406/rds/hpc-work/APM_outputs/'+VERSION
# Load cross-validation information to get GUPI and outcomes
cv_splits = pd.read_csv('../cross_validation_splits.csv')
study_GUPI_GOSE = cv_splits[['GUPI','GOSE']].drop_duplicates()
# Define repeat directory based on last completed repeat
repeat_dir = os.path.join(model_dir,'repeat'+str(REPEAT).zfill(int(np.log10(cv_splits.repeat.max()))+1))
# Set number of cores for all parallel processing
NUM_CORES = multiprocessing.cpu_count()
# Set number of resamples for bootstrapping
NUM_RESAMP = 1000
# Load tuning grid based on the last completed repeat
if REPEAT == 1:
tuning_grid = pd.read_csv(os.path.join(model_dir,'APM_deep_tuning_grid.csv'))
tuning_grid['TUNE_IDX'] = tuning_grid['TUNE_IDX'].astype(str).str.zfill(4)
tuning_grid['NEURONS'] = tuning_grid['NEURONS'].apply(eval)
else:
tuning_grid = pd.read_csv(os.path.join(model_dir,'APM_post_repeat_'+str(REPEAT).zfill(2)+'_deep_tuning_grid.csv'))
tuning_grid['TUNE_IDX'] = tuning_grid['TUNE_IDX'].astype(str).str.zfill(4)
tuning_grid['NEURONS'] = tuning_grid['NEURONS'].apply(eval)
### II. Calculate ORC of extant validation predictions
# Perform validation prediction file search
val_pred_files = []
for path in Path(model_dir).rglob('*/val_predictions.csv'):
val_pred_files.append(str(path.resolve()))
# Characterise validation prediction file information
val_pred_file_info_df = pd.DataFrame({'file':val_pred_files,
'TUNE_IDX':[re.search('tune_(.*)/', curr_file).group(1) for curr_file in val_pred_files],
'VERSION':[re.search('APM_outputs/(.*)/repeat', curr_file).group(1) for curr_file in val_pred_files],
'repeat':[int(re.search('/repeat(.*)/fold', curr_file).group(1)) for curr_file in val_pred_files],
'fold':[int(re.search('/fold(.*)/tune_', curr_file).group(1)) for curr_file in val_pred_files]
}).sort_values(by=['repeat','fold','TUNE_IDX','VERSION']).reset_index(drop=True)
# Merge output activation information to validation prediction info dataframe
val_pred_file_info_df = pd.merge(val_pred_file_info_df,tuning_grid[['TUNE_IDX','OUTPUT_ACTIVATION']],how='left',on='TUNE_IDX').reset_index(drop=True)
# Partition validation files across number of available cores for parallel processing
npc = [val_pred_file_info_df.shape[0] // NUM_CORES for _ in range(NUM_CORES)]
npc[:(val_pred_file_info_df.shape[0] - sum(npc))] = [val+1 for val in npc[:(val_pred_file_info_df.shape[0] - sum(npc))]]
end_indices = np.cumsum(npc)
start_indices = np.insert(end_indices[:-1],0,0)
# Build arguments for parallelisation function
arg_iterable = [(val_pred_file_info_df.iloc[start_indices[idx]:end_indices[idx]].reset_index(drop=True),True,'Calculating validation set ORC') for idx in range(len(start_indices))]
# Calculate ORC of each validation prediction file in parallel
with multiprocessing.Pool(NUM_CORES) as pool:
compiled_val_ORC = pd.concat(pool.starmap(calc_orc, arg_iterable),ignore_index = True)
# Save validation prediction ORC in the repeat directory
compiled_val_ORC.to_csv(os.path.join(repeat_dir,'validation_performance.csv'),index=False)
# Group by tuning index and average validation ORC
across_cv_perf = compiled_val_ORC.groupby(['TUNE_IDX','OUTPUT_ACTIVATION'],as_index=False)['val_ORC'].mean()
# Determine 'optimal' tuning indices based on validation performance
opt_tune_idx = across_cv_perf[across_cv_perf.groupby('OUTPUT_ACTIVATION')['val_ORC'].transform(max) == across_cv_perf['val_ORC']].reset_index(drop=True)
### III. Prepare bootstrapping resamples for configuration dropout
# Create directory for storing dropout bootstrapping information
dropout_dir = os.path.join('/home/sb2406/rds/hpc-work/interrepeat_dropout','APM_deep',VERSION)
os.makedirs(dropout_dir,exist_ok=True)
# Create stratified resamples for bootstrapping
bs_rs_GUPIs = [resample(study_GUPI_GOSE.GUPI.values,replace=True,n_samples=study_GUPI_GOSE.shape[0],stratify=study_GUPI_GOSE.GOSE.values) for _ in range(NUM_RESAMP)]
bs_rs_GUPIs = [np.unique(curr_rs) for curr_rs in bs_rs_GUPIs]
# Create Data Frame to store bootstrapping resmaples
bs_resamples = pd.DataFrame({'RESAMPLE_IDX':[i+1 for i in range(NUM_RESAMP)],'GUPIs':bs_rs_GUPIs})
# Create Data Frame of output activation - resample combos
output_resample_combos = pd.DataFrame(list(itertools.product(compiled_val_ORC.OUTPUT_ACTIVATION.unique(), bs_resamples.RESAMPLE_IDX.unique())), columns=['OUTPUT_ACTIVATION', 'RESAMPLE_IDX'])
# Merge the two dataframes
bs_resamples = pd.merge(bs_resamples,output_resample_combos,how='outer',on='RESAMPLE_IDX')
# Append information of optimal tuning index
bs_resamples = pd.merge(bs_resamples,opt_tune_idx[['TUNE_IDX','OUTPUT_ACTIVATION']],how='left',on='OUTPUT_ACTIVATION')
bs_resamples = bs_resamples.rename(columns={'TUNE_IDX':'OPT_TUNE_IDX'})
# Save current resample information for parallelised hypothesis testing
bs_resamples.to_pickle(os.path.join(dropout_dir,'post_repeat_'+str(REPEAT).zfill(2)+'_resamples.pkl'))
# NOTE: at this point, run the scripts for 6c
### IV. Dropout configurations that are consistently (a = .05) inferior in performance
# Find all bootstrapped ORC results
bs_orc_files = []
for path in Path(os.path.join(dropout_dir,'repeat'+str(REPEAT).zfill(2))).rglob('*.pkl'):
curr_path = str(path.resolve())
if ('softmax_dropout' in curr_path) | ('sigmoid_dropout' in curr_path):
bs_orc_files.append(curr_path)
# Characterise file information
bs_orc_info_df = pd.DataFrame({'file':bs_orc_files,
'REPEAT':[int(re.search('/repeat(.*)/', curr_file).group(1)) for curr_file in bs_orc_files],
'OUTPUT_ACTIVATION':[re.search('/repeat(.*)_dropout', curr_file).group(1) for curr_file in bs_orc_files],
'RESAMPLE_IDX':[int(re.search('resample_idx_(.*).pkl', curr_file).group(1)) for curr_file in bs_orc_files]}).sort_values(by=['OUTPUT_ACTIVATION','RESAMPLE_IDX']).reset_index(drop=True)
bs_orc_info_df['OUTPUT_ACTIVATION'] = bs_orc_info_df['OUTPUT_ACTIVATION'].str[3:]
# Initialise an empty list to store dropped out tuning configurations
dropped_tis = []
# Iterate through output activation options
for curr_OUTPUT_ACTIVATION in bs_orc_info_df.OUTPUT_ACTIVATION.unique():
# Load and compile all files within current output activation
curr_output_bs = pd.concat([pd.read_pickle(f) for f in bs_orc_info_df.file[bs_orc_info_df.OUTPUT_ACTIVATION == curr_OUTPUT_ACTIVATION]],ignore_index=True)
# Calculate p-value for each tuning index
p_val_df = curr_output_bs.groupby('TUNE_IDX',as_index=False)['trial_win'].apply(lambda x: x.sum()/len(x))
p_val_df = p_val_df.rename(columns={'trial_win':'p_val'})
# Find significantly poor configurations for dropout
sig_df = p_val_df[p_val_df.p_val <= 0.05].reset_index(drop=True)
# Add dropped tuning indices to list
dropped_tis += sig_df.TUNE_IDX.to_list()
# Print out how many configurations have been dropped
print(str(len(dropped_tis))+' tuning indices out of '+str(tuning_grid.shape[0])+' dropped after repeat '+str(REPEAT))
# Update viable tuning grid and save
viable_tuning_grid = tuning_grid[~tuning_grid.TUNE_IDX.isin(dropped_tis)].reset_index(drop=True)
viable_tuning_grid.to_csv(os.path.join(model_dir,'APM_post_repeat_'+str(REPEAT).zfill(2)+'_deep_tuning_grid.csv'),index=False)
# Clear disk space by deleting folders of dropped out models
dropped_configs = val_pred_file_info_df[~val_pred_file_info_df.TUNE_IDX.isin(viable_tuning_grid.TUNE_IDX)].reset_index(drop=True)
dropped_configs['directory'] = dropped_configs['file'].str.replace("/val_predictions.csv", "", regex=False)
for d in dropped_configs.directory:
rmtree(d)
# NOTE: at this point, train models on subsequent cross-validation partitions and repeat until all CV-partitions have been trained over
### V. Compile and save validation and testing set predictions across partitions
# Search for all prediction files
pred_files = []
for path in Path(model_dir).rglob('*_predictions.csv'):
pred_files.append(str(path.resolve()))
# Characterise the prediction files found
pred_file_info_df = pd.DataFrame({'file':pred_files,
'TUNE_IDX':[re.search('tune_(.*)/', curr_file).group(1) for curr_file in pred_files],
'VERSION':[re.search('_outputs/(.*)/repeat', curr_file).group(1) for curr_file in pred_files],
'repeat':[int(re.search('/repeat(.*)/fold', curr_file).group(1)) for curr_file in pred_files],
'fold':[int(re.search('/fold(.*)/tune_', curr_file).group(1)) for curr_file in pred_files],
'test_or_val':[re.search('/tune_(.*)_predictions', curr_file).group(1) for curr_file in pred_files]
}).sort_values(by=['repeat','fold','TUNE_IDX','VERSION']).reset_index(drop=True)
pred_file_info_df['test_or_val'] = pred_file_info_df['test_or_val'].str.split('/').str[-1]
pred_file_info_df = pred_file_info_df[pred_file_info_df.TUNE_IDX.isin(tuning_grid.TUNE_IDX)].reset_index(drop=True)
pred_file_info_df = pd.merge(pred_file_info_df,tuning_grid[['TUNE_IDX','OUTPUT_ACTIVATION']],how='left',on='TUNE_IDX')
# Separate prediction files by outcome encoding and testing vs. validation
softmax_val_info_df = pred_file_info_df[(pred_file_info_df.OUTPUT_ACTIVATION == 'softmax') & (pred_file_info_df.test_or_val == 'val')].reset_index(drop=True)
softmax_test_info_df = pred_file_info_df[(pred_file_info_df.OUTPUT_ACTIVATION == 'softmax') & (pred_file_info_df.test_or_val == 'test')].reset_index(drop=True)
sigmoid_val_info_df = pred_file_info_df[(pred_file_info_df.OUTPUT_ACTIVATION == 'sigmoid') & (pred_file_info_df.test_or_val == 'val')].reset_index(drop=True)
sigmoid_test_info_df = pred_file_info_df[(pred_file_info_df.OUTPUT_ACTIVATION == 'sigmoid') & (pred_file_info_df.test_or_val == 'test')].reset_index(drop=True)
# Compile predictions into single dataframes
softmax_val_preds = pd.concat([pd.read_csv(curr_file) for curr_file in softmax_val_info_df.file.to_list()],ignore_index=True)
softmax_test_preds = pd.concat([pd.read_csv(curr_file) for curr_file in softmax_test_info_df.file.to_list()],ignore_index = True)
sigmoid_val_preds = pd.concat([ | pd.read_csv(curr_file) | pandas.read_csv |
import unittest
import numpy as np
import os
import shutil
import json
from pathlib import Path
from unittest.mock import patch, MagicMock
from appdirs import user_cache_dir
import geopandas as gpd
import warnings
import shutil
from ausdex.seifa_vic.data_wrangling import preprocess_victorian_datasets
from ausdex.seifa_vic.seifa_vic import Metric
import pandas as pd
from typer.testing import CliRunner
from ausdex import main
from ausdex.seifa_vic.data_io import (
get_data_links,
load_aurin_config,
load_aurin_data,
download_from_aurin,
get_aurin_wfs,
load_shapefile_data,
load_victorian_suburbs_metadata,
)
from ausdex.files import get_cached_path
import json
import datetime
MOCKED_FILES = [
"seifa_1986_aurin.geojson",
"seifa_1991_aurin.geojson",
"state_suburb_codes_2011..csv.zip",
"seifa_1996_aurin.geojson",
"seifa_2001_aurin.geojson",
"seifa_2006.geojson",
"victoria_councils",
"victoria_suburbs",
"victoria_councils.geojson",
"victoria_suburbs.geojson",
"seifa_2006_cd.xls",
"seifa_suburb_2011.xls",
"seifa_suburb_2016.xls",
"mock_completed.csv",
"aurin_schemas.json",
"seifa_2011_sa1.xls",
"seifa_2016_sa1.xls",
"sa1_gis_2011.geojson",
"sa1_gis_2016.geojson",
]
def fake_data_cached_path(filename):
return (
Path(__file__).parent.resolve() / "testdata" / "ausdex" / "mock_gis" / filename
)
def mock_user_get_cached_path(filename):
print(f"using cached test data for {filename}")
if filename in MOCKED_FILES:
if filename == "state_suburb_codes_2011..csv.zip":
fcp = fake_data_cached_path("SSC_2011_AUST.csv")
print(f"loading test file from {fcp}")
return fcp
else:
fcp = fake_data_cached_path(filename)
print(f"loading test file from {fcp}")
return fcp
else:
cache_dir = Path(user_cache_dir("ausdex"))
cache_dir.mkdir(exist_ok=True, parents=True)
return cache_dir / filename
def mock_load_shapefile_data(filename):
if filename == "seifa_2006_cd_shapefile":
return gpd.read_file(mock_user_get_cached_path("seifa_2006.geojson"))
elif filename == "sa1_gis_2011":
return gpd.read_file(mock_user_get_cached_path("sa1_gis_2011.geojson"))
elif filename == "sa1_gis_2016":
return gpd.read_file(mock_user_get_cached_path("sa1_gis_2016.geojson"))
def mock_preproces_vic_datasets(force_rebuild=False, save_file=False):
print("loading mocked completed dataset")
return pd.read_csv(mock_user_get_cached_path("mock_completed.csv"))
class TestSeifaVicSetup(unittest.TestCase):
@patch(
"ausdex.seifa_vic.data_io.get_cached_path",
lambda filename: mock_user_get_cached_path(filename),
)
@patch(
"ausdex.seifa_vic.data_wrangling.load_shapefile_data",
lambda filename: mock_load_shapefile_data(filename),
)
def test_preprocess_victorian_datasets(self):
df = preprocess_victorian_datasets(force_rebuild=True, save_file=False)
cols = [
"ieo_score",
"ier_score",
"irsad_score",
"rirsa_score",
"uirsa_score",
"irsd_score",
"year",
"Site_suburb",
]
for col in cols:
assert col in df.columns, f"{col} not in dataset"
self.assertEqual(df.year.max(), 2016)
self.assertEqual(df.year.min(), 1986)
self.assertIn("ASCOT - BALLARAT", df.Site_suburb.unique())
self.assertNotIn("ASCOT - BALLARAT CITY", df.Site_suburb.unique())
def test_group_repeat_names_vic(self):
from ausdex.seifa_vic.data_wrangling import group_repeat_names_vic
ids = [
"VIC2961",
"VIC2967",
"VIC2976",
"VIC2984",
"VIC2990",
"VIC2969",
"VIC2963",
]
fixed_suburbs = [
"BELLFIELD - GRAMPIANS",
"BELLFIELD - BANYULE",
"<NAME> - SWAN HILL",
"HILLSIDE - MELTON",
"RE<NAME> - MITCHELL",
"SPRINGFIELD - <NAME>",
"<NAME> - HEPBURN",
]
for id, suburb in zip(ids, fixed_suburbs):
x = {"loc_pid": id, "suburb_name_combined": "test_failed"}
value = group_repeat_names_vic(x)
self.assertAlmostEqual(value, suburb)
x = {"loc_pid": "wrong", "suburb_name_combined": "test_failed"}
value = group_repeat_names_vic(x)
self.assertEqual(value, "test_failed")
@patch(
"ausdex.seifa_vic.seifa_vic.preprocess_victorian_datasets",
lambda force_rebuild: mock_preproces_vic_datasets(False)
if force_rebuild == True
else None,
)
def test_assemble_data_cli(self):
runner = CliRunner()
result = runner.invoke(main.app, ["seifa-vic-assemble"])
assert result.exit_code == 0
assert "Data loaded" in result.stdout
@patch(
"ausdex.seifa_vic.seifa_vic.preprocess_victorian_datasets",
lambda force_rebuild: mock_preproces_vic_datasets(False),
)
class TestSeifaInterpolation(unittest.TestCase):
def setUp(self) -> None:
from ausdex.seifa_vic.seifa_vic import interpolate_vic_suburb_seifa
self.interpolate = interpolate_vic_suburb_seifa
return super().setUp()
def test_interpolation_null(self):
value = self.interpolate(
[1980, 1987],
"ABBOTSFORD",
"ier_score",
)
# self.assertTrue(value[0] == np.nan)
# print(value)
self.assertTrue(np.isnan(value[0]))
self.assertAlmostEqual(value[1], 955.5048835511469, places=3)
def test_suburb_guess_misspelt(self):
value = self.interpolate(
[1980, 1987],
"ABBOTSFORDXX",
"ier_score",
guess_misspelt=True,
)
self.assertTrue(np.isnan(value[0]))
self.assertAlmostEqual(value[1], 955.5048835511469, places=3)
def test_interpolation_negative(self):
value = self.interpolate(
2200, "ASCOT - BALLARAT", Metric["ier_score"], fill_value="extrapolate"
)
# self.assertTrue(value[0] == np.nan)
# print(value)
self.assertTrue(value == 0)
def test_interpolation_onevalue(self):
from ausdex.seifa_vic import SeifaVic
seifa_vic = SeifaVic(False)
seifa_vic.df = pd.DataFrame(
{
"Site_suburb": ["TEST_SUB1", "test_sub2"],
"ier_score": [36.4, 38.5],
"year": [2000, 2011],
}
)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
out = seifa_vic.get_seifa_interpolation(
2011, "TEST_SUB1", "ier_score", fill_value="extrapolate"
)
self.assertEqual(36.4, out)
assert len(w) == 1
assert (
"Suburb 'TEST_SUB1' only has one value for ier_score, assuming flat line"
in str(w[-1].message)
)
def test_interpolation_novalue(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
value = self.interpolate(
2200, "Fake", "ier_score", fill_value="extrapolate"
)
# self.assertTrue(value[0] == np.nan)
# print(value)
self.assertTrue(np.isnan(value))
assert len(w) == 1
assert "No suburb named 'FAKE'. Returning NaN." in str(w[-1].message)
def test_interpolation_extrapolate(self):
value = self.interpolate(
pd.Series([1980, 2000]),
"ABBOTSFORD",
"ier_score",
fill_value="extrapolate",
)
self.assertAlmostEqual(value[0], 868.1914314671592, places=3)
self.assertAlmostEqual(value[1], 1055.278795, places=3)
def test_interpolate_boundary_value(self):
value = self.interpolate(
np.array([1980, 1986]),
"ABBOTSFORD",
"ieo_score",
fill_value="boundary_value",
)
self.assertAlmostEqual(value[0], value[1], places=3)
def test_interpolate_multiple_suburbs(self):
value = self.interpolate(
["1-7-1980", "31-10-1986"],
pd.Series(["kew", "ABBOTSFORD"]),
"ieo_score",
fill_value="boundary_value",
)
self.assertAlmostEqual(value[0], 1179.648871, places=3)
self.assertAlmostEqual(994.3434, value[1], places=3)
def test_interpolate_multiple_suburbs_array(self):
value = self.interpolate(
| pd.Series(["1-7-1980", "31-10-1986"]) | pandas.Series |
"""
DeepLabCut2.0 Toolbox (deeplabcut.org)
© <NAME>
https://github.com/AlexEMG/DeepLabCut
Please see AUTHORS for contributors.
https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
Licensed under GNU Lesser General Public License v3.0
"""
import os, pickle, yaml
import pandas as pd
from pathlib import Path
import numpy as np
from deeplabcut.utils import auxiliaryfunctions
def convertcsv2h5(config,userfeedback=True,scorer=None):
"""
Convert (image) annotation files in folder labeled-data from csv to h5.
This function allows the user to manually edit the csv (e.g. to correct the scorer name and then convert it into hdf format).
WARNING: conversion might corrupt the data.
config : string
Full path of the config.yaml file as a string.
userfeedback: bool, optional
If true the user will be asked specifically for each folder in labeled-data if the containing csv shall be converted to hdf format.
scorer: string, optional
If a string is given, then the scorer/annotator in all csv and hdf files that are changed, will be overwritten with this name.
Examples
--------
Convert csv annotation files for reaching-task project into hdf.
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml')
--------
Convert csv annotation files for reaching-task project into hdf while changing the scorer/annotator in all annotation files to Albert!
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml',scorer='Albert')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
folders = [Path(config).parent / 'labeled-data' /Path(i) for i in video_names]
if scorer==None:
scorer=cfg['scorer']
for folder in folders:
try:
if userfeedback==True:
print("Do you want to convert the csv file in folder:", folder, "?")
askuser = input("yes/no")
else:
askuser="yes"
if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha': # multilanguage support :)
fn=os.path.join(str(folder),'CollectedData_' + cfg['scorer'] + '.csv')
data=pd.read_csv(fn)
#nlines,numcolumns=data.shape
orderofbpincsv=list(data.values[0,1:-1:2])
imageindex=list(data.values[2:,0])
#assert(len(orderofbpincsv)==len(cfg['bodyparts']))
print(orderofbpincsv)
print(cfg['bodyparts'])
#TODO: test len of images vs. len of imagenames for another sanity check
index = pd.MultiIndex.from_product([[scorer], orderofbpincsv, ['x', 'y']],names=['scorer', 'bodyparts', 'coords'])
frame = pd.DataFrame(np.array(data.values[2:,1:],dtype=float), columns = index, index = imageindex)
frame.to_hdf(os.path.join(str(folder),'CollectedData_'+ cfg['scorer']+".h5"), key='df_with_missing', mode='w')
frame.to_csv(fn)
except FileNotFoundError:
print("Attention:", folder, "does not appear to have labeled data!")
def analyze_videos_converth5_to_csv(videopath,videotype='.avi'):
"""
By default the output poses (when running analyze_videos) are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
in the same directory, where the video is stored. If the flag save_as_csv is set to True, the data is also exported as comma-separated value file. However,
if the flag was *not* set, then this function allows the conversion of all h5 files to csv files (without having to analyze the videos again)!
This functions converts hdf (h5) files to the comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.
Parameters
----------
videopath : string
A strings containing the full paths to videos for analysis or a path to the directory where all the videos with same extension are stored.
videotype: string, optional
Checks for the extension of the video in case the input to the video is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``
Examples
--------
Converts all pose-output files belonging to mp4 videos in the folder '/media/alex/experimentaldata/cheetahvideos' to csv files.
deeplabcut.analyze_videos_converth5_to_csv('/media/alex/experimentaldata/cheetahvideos','.mp4')
"""
start_path=os.getcwd()
os.chdir(videopath)
Videos=[fn for fn in os.listdir(os.curdir) if (videotype in fn) and ('_labeled.mp4' not in fn)] #exclude labeled-videos!
Allh5files=[fn for fn in os.listdir(os.curdir) if (".h5" in fn) and ("resnet" in fn)]
for video in Videos:
vname = Path(video).stem
#Is there a scorer for this?
PutativeOutputFiles=[fn for fn in Allh5files if vname in fn]
for pfn in PutativeOutputFiles:
scorer=pfn.split(vname)[1].split('.h5')[0]
if "DeepCut" in scorer:
DC = pd.read_hdf(pfn, 'df_with_missing')
print("Found output file for scorer:", scorer)
print("Converting to csv...")
DC.to_csv(pfn.split('.h5')[0]+'.csv')
os.chdir(str(start_path))
print("All pose files were converted.")
def pathmagic(string):
parts=string.split('\\')
if len(parts)==1:
return string
elif len(parts)==3: #this is the expected windows case, it will split into labeled-data, video, imgNR.png
return os.path.join(*parts) #unpack arguments from list with splat operator
else:
return string
def convertpaths_to_unixstyle(Data,fn,cfg):
''' auxiliary function that converts paths in annotation files:
labeled-data\\video\\imgXXX.png to labeled-data/video/imgXXX.png '''
Data.to_csv(fn + "windows" + ".csv")
Data.to_hdf(fn + "windows" + '.h5','df_with_missing',format='table', mode='w')
imindex=[pathmagic(s) for s in Data.index]
for j,bpt in enumerate(cfg['bodyparts']):
index = | pd.MultiIndex.from_product([[cfg['scorer']], [bpt], ['x', 'y']],names=['scorer', 'bodyparts', 'coords']) | pandas.MultiIndex.from_product |
from typing import List, Optional, Tuple, Type, Union, Dict, Any
from datetime import datetime, timedelta, tzinfo
from pprint import pformat
import os
import warnings
import logging
import inspect
import pytz
import numpy as np
import pandas as pd
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Query
from sqlalchemy.dialects import postgresql
from timetomodel.utils.debug_utils import render_query
from timetomodel.utils.time_utils import (
tz_aware_utc_now,
timedelta_to_pandas_freq_str,
timedelta_fits_into,
)
from timetomodel.exceptions import IncompatibleModelSpecs, NaNData, MissingData
from timetomodel.transforming import Transformation, ReversibleTransformation
"""
Specs for the context of your model and how to treat your model data.
"""
DEFAULT_RATIO_TRAINING_TESTING_DATA = 2 / 3
DEFAULT_REMODELING_FREQUENCY = timedelta(days=1)
np.seterr(all="warn")
warnings.filterwarnings("error", message="invalid value encountered in power")
logger = logging.getLogger(__name__)
class SeriesSpecs(object):
"""Describes a time series (e.g. a pandas Series).
In essence, a column in the regression frame, filled with numbers.
Using this base class, the column will be filled with NaN values.
If you have data to be loaded in automatically, you should be using one of the subclasses, which allow to describe
or pass in an actual data source to be loaded.
When dealing with columns, our code should usually refer to this superclass so it does not need to care
which kind of data source it is dealing with.
"""
# The name in the resulting feature frame, and possibly in the saved model specs (named by outcome var)
name: str
# The name of the data column in the data source. If None, the name will be tried.
column: Optional[str]
# timezone of the data - e.g. useful when de-serializing data (pandas serialises to UTC)
original_tz: tzinfo
# Custom transformation on feature data to be made before forecasting, back-transformed right after.
feature_transformation: Optional[ReversibleTransformation]
# Custom processing on data right after loading, e.g. for cleanup
post_load_processing: Optional[Transformation]
# Custom resampling parameters. All parameters apply to pd.resample, only "aggregation" is the name
# of the aggregation function to be called of the resulting resampler
resampling_config: Dict[str, Any]
interpolation_config: Dict[str, Any]
def __init__(
self,
name: str,
original_tz: Optional[
tzinfo
] = None, # TODO: why should this be possible to be set?
feature_transformation: Optional[ReversibleTransformation] = None,
post_load_processing: Optional[Transformation] = None,
resampling_config: Dict[str, Any] = None,
interpolation_config: Dict[str, Any] = None,
):
self.name = name
self.original_tz = original_tz
self.feature_transformation = feature_transformation
self.post_load_processing = post_load_processing
self.resampling_config = resampling_config
self.interpolation_config = interpolation_config
self.__series_type__ = self.__class__.__name__
def as_dict(self):
return vars(self)
def _load_series(self) -> pd.Series:
"""Subclasses overwrite this function to get the raw data.
This method is responsible to call any post_load_processing at the right place."""
data = pd.Series()
if self.post_load_processing is not None:
return self.post_load_processing.transform_series(data)
return data
def load_series(
self,
expected_frequency: timedelta,
transform_features: bool = False,
check_time_window: Optional[Tuple[datetime, datetime]] = None,
) -> pd.Series:
"""Load the series data, check compatibility of series data with model specs
and perform feature transformation, if needed.
The actual implementation how to load is deferred to _load_series. Overwrite that for new subclasses.
This function resamples data if the frequency is not equal to the expected frequency.
It is possible to customise this resampling (without that, we aggregate means after default resampling).
To customize resampling, pass in a `resampling_config` argument when you initialize a SeriesSpecs,
with an aggregation method name (e.g. "mean") and kw params which are to be passed into
`pandas.Series.resample`. For example:
`resampling_config={"closed": "left", "aggregation": "sum"}`
Similarly, pass in an `interpolation_config` to the class with kw params to pass into
`pandas.Series.interpolate`. For example, to fill gaps of at most 1 consecutive NaN value through
interpolation of the time index:
`interpolation_config={"method": "time", "limit": 1}`
You can check if a time window would be feasible, i.e. if enough data is loaded, and get suggestions.
Be sure to pass datetimes with tzinfo compatible to your data.
"""
data = self._load_series().sort_index()
# check if data has a DateTimeIndex
if not isinstance(data.index, pd.DatetimeIndex):
raise IncompatibleModelSpecs(
"Loaded series has no DatetimeIndex, but %s" % type(data.index).__name__
)
# make sure we have a time zone (default to UTC), save original time zone
if data.index.tzinfo is None:
self.original_tz = pytz.utc
data.index = data.index.tz_localize(self.original_tz)
else:
self.original_tz = data.index.tzinfo
if self.interpolation_config is not None:
data = self.interpolate_data(data)
# Raise error if data is empty or contains nan values
if data.empty:
raise MissingData(
"No values found in requested %s data. It's no use to continue I'm afraid."
)
if data.isnull().values.any():
raise NaNData(
"Nan values found in the requested %s data. It's no use to continue I'm afraid."
)
# check if we have enough data for the expected time window
if check_time_window is not None:
error_msg = ""
if data.index[0] > check_time_window[0]:
error_msg += (
"Data starts too late (at %s), while we need data from %s"
% (data.index[0], check_time_window[0])
)
if data.index[-1] < check_time_window[1]:
error_msg += (
"Data ends too early (at %s), while we need data until %s"
% (data.index[-1], check_time_window[1])
)
if error_msg:
raise MissingData(error_msg)
# check if time series frequency is okay, if not then resample, and check again
if data.index.freqstr != timedelta_to_pandas_freq_str(expected_frequency):
data = self.resample_data(data, expected_frequency)
if data.index.freqstr != timedelta_to_pandas_freq_str(expected_frequency):
raise IncompatibleModelSpecs(
"Loaded data for %s has different frequency (%s) than used in model specs expect (%s)."
% (
self.name,
data.index.freqstr,
timedelta_to_pandas_freq_str(expected_frequency),
)
)
if transform_features and self.feature_transformation is not None:
data = self.feature_transformation.transform_series(data)
return data
def resample_data(self, data, expected_frequency) -> pd.Series:
if self.resampling_config is None:
data = data.resample(
timedelta_to_pandas_freq_str(expected_frequency)
).mean()
else:
data_resampler = data.resample(
timedelta_to_pandas_freq_str(expected_frequency),
**{
k: v
for k, v in self.resampling_config.items()
if k != "aggregation"
}
)
if "aggregation" not in self.resampling_config:
data = data_resampler.mean()
else:
for agg_name, agg_method in inspect.getmembers(
data_resampler, inspect.ismethod
):
if self.resampling_config["aggregation"] == agg_name:
data = agg_method()
break
else:
raise IncompatibleModelSpecs(
"Cannot find resampling aggregation %s on %s"
% (self.resampling_config["aggregation"], data_resampler)
)
return data
def interpolate_data(self, data) -> pd.Series:
try:
data = data.interpolate(**self.interpolation_config)
except ValueError as e:
raise IncompatibleModelSpecs(
"Cannot call interpolate function with arguments %s. %s"
% (self.interpolation_config, e)
)
return data
def __repr__(self):
return "%s: <%s>" % (self.__class__.__name__, self.as_dict())
class ObjectSeriesSpecs(SeriesSpecs):
"""
Spec for a pd.Series object that is being passed in and is stored directly in the specs.
"""
data: pd.Series
def __init__(
self,
data: pd.Series,
name: str,
original_tz: Optional[tzinfo] = None,
feature_transformation: Optional[ReversibleTransformation] = None,
post_load_processing: Optional[Transformation] = None,
resampling_config: Dict[str, Any] = None,
interpolation_config: Dict[str, Any] = None,
):
super().__init__(
name,
original_tz,
feature_transformation,
post_load_processing,
resampling_config,
interpolation_config,
)
if not isinstance(data.index, pd.DatetimeIndex):
raise IncompatibleModelSpecs(
"Please provide a DatetimeIndex. Only found %s."
% type(data.index).__name__
)
self.data = data
def _load_series(self) -> pd.Series:
if self.post_load_processing is not None:
return self.post_load_processing.transform_series(self.data)
return self.data
class DFFileSeriesSpecs(SeriesSpecs):
"""
Spec for a pandas DataFrame source.
This class holds the filename, from which we unpickle the data frame, then read the column.
"""
file_path: str
time_column: str
value_column: str
def __init__(
self,
file_path: str,
time_column: str,
value_column: str,
name: str,
original_tz: Optional[tzinfo] = None,
feature_transformation: ReversibleTransformation = None,
post_load_processing: Optional[Transformation] = None,
resampling_config: Dict[str, Any] = None,
interpolation_config: Dict[str, Any] = None,
):
super().__init__(
name,
original_tz,
feature_transformation,
post_load_processing,
resampling_config,
interpolation_config,
)
self.file_path = file_path
self.time_column = time_column
self.value_column = value_column
def _load_series(self) -> pd.Series:
df: pd.DataFrame = pd.read_pickle(self.file_path)
if self.post_load_processing is not None:
df = self.post_load_processing.transform_dataframe(df)
df[self.time_column] = pd.to_datetime(df[self.time_column])
df.set_index(self.time_column, drop=True, inplace=True)
return df[self.value_column]
class CSVFileSeriesSpecs(SeriesSpecs):
"""
Spec for a CSV file source.
This class holds the filename, from which we load the data frame, then read the column.
Any special configuration of pd.read_csv can be given in the `read_csv_config` dict.
"""
file_path: str
time_column: str
value_column: str
read_csv_config: Dict[str, Any]
def __init__(
self,
file_path: str,
time_column: str,
value_column: str,
name: str,
read_csv_config: Dict[str, Any] = None,
original_tz: Optional[tzinfo] = None,
feature_transformation: ReversibleTransformation = None,
post_load_processing: Optional[Transformation] = None,
resampling_config: Dict[str, Any] = None,
interpolation_config: Dict[str, Any] = None,
):
super().__init__(
name,
original_tz,
feature_transformation,
post_load_processing,
resampling_config,
interpolation_config,
)
self.file_path = file_path
self.time_column = time_column
self.value_column = value_column
self.read_csv_config = read_csv_config
def _load_series(self) -> pd.Series:
if not os.path.exists(self.file_path):
raise IncompatibleModelSpecs(
"Filepath %s does not seem to exist." % self.file_path
)
if self.read_csv_config is None:
df: pd.DataFrame = | pd.read_csv(self.file_path) | pandas.read_csv |
"""
Preprocess pipeline for Jefferson Lab High Resolution Spectrometer Calibration(R)
Free distribution under MIT license
copyright @ <NAME> (<EMAIL>)
"""
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import PolynomialFeatures
from pandas import DataFrame
import numpy as np
class preprocess(object):
pass
class PolynomialFeatureTransformer(BaseEstimator,TransformerMixin):
"""
"""
def __init__(self,columns=None,max_order=5,verbose=0):
"""
:param columns:
:param max_order:
:param verbose:
"""
self.columns = columns
self.max_order = max_order
self.verbose = verbose
def fit(self,X,y=None):
"""
:param X:
:param y:
:return:
"""
return self
def transform(self,X):
"""
:param X:
:return:
"""
# TODO need to check the format of the X, and adapt it to the format
if self.columns:
if not all([x in X.columns for x in self.columns]):
raise TypeError('the feature in the dataset does not match the columns input!!, please check the \'columns\'')
else:
self.columns = X.columns
selected = X[self.columns]
un_selected = X.drop(columns = self.columns,axis = 1)
poly = PolynomialFeatures(self.max_order)
transValue = poly.fit_transform(selected)
transHeader= poly.get_feature_names(selected.columns)
#format dataset and concast it into one
transformed = | pd.DataFrame(transValue,columns=transHeader) | pandas.DataFrame |
#!/usr/bin/env python3
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
class _Swarmplot_And_Errorbar:
def __init__(self):
self._fig = None
self._ax = None
self._result = None
def _plot(self, x, y, data, hue = None, ax = None, swarmplot_kwargs = dict(), errorbar_kwargs = dict()):
# delete multiple values (if any, values in dictionary are used)
swarmplot_kwargs, x = _pop_if_exists(swarmplot_kwargs, x, "x")
swarmplot_kwargs, y = _pop_if_exists(swarmplot_kwargs, y, "y")
swarmplot_kwargs, data = _pop_if_exists(swarmplot_kwargs, data, "data")
swarmplot_kwargs, hue = _pop_if_exists(swarmplot_kwargs, hue, "hue")
swarmplot_kwargs, ax = _pop_if_exists(swarmplot_kwargs, ax, "ax")
# raise error
if ("x" in errorbar_kwargs) or ("y" in errorbar_kwargs):
raise TypeError("'x' and 'y' must be given as strings, not in dictionary")
if ("xerr" in errorbar_kwargs) and ("yerr" in errorbar_kwargs):
raise TypeError("'xerr' and 'yerr' cannot be given together")
# check the orientation of swarmplot
swarmplot_kwargs_new = _set_dict_as_default(swarmplot_kwargs, ["dodge", "alpha"], [True, 0.7])
if "orient" in swarmplot_kwargs_new.keys():
orient = swarmplot_kwargs_new["orient"]
else:
orient = "v"
if orient == "v":
axis0, axis1 = x, y
else:
axis0, axis1 = y, x
# set default parameters (if not given)
if orient == "v":
errorbar_kwargs_new = _set_dict_as_default(errorbar_kwargs, ["fmt", "color", "markersize", "capsize"], ["_", "k", 10, 10])
else:
errorbar_kwargs_new = _set_dict_as_default(errorbar_kwargs, ["fmt", "color", "markersize", "capsize"], ["|", "k", 10, 10])
# set some parameters required for swarmplot
if "order" in swarmplot_kwargs_new.keys():
order = swarmplot_kwargs_new.pop("order")
else:
order = sorted(list( | pd.unique(data[axis0]) | pandas.unique |
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
import string
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
from pandas.io.common import urlopen
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: Tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def randbool(size=(), p: float = 0.5):
return np.random.rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif | is_timedelta64_dtype(dtype) | pandas.core.dtypes.common.is_timedelta64_dtype |
import datetime
import pandas as pd
from pandas.util.testing import assert_frame_equal
from announcements import *
from announcement import *
from database_mysql import *
from nose import with_setup
import os
from unittest.mock import patch
from urllib import parse, request
directory = os.path.dirname(os.path.realpath(__file__))
def setup():
"""set up test fixtures"""
def teardown():
"""tear down test fixtures"""
def test_announcements_constructor():
"""Testing the announcement constructor."""
sql = "Select * from company_announcements where company_id = 1045 " \
"order by published_at DESC"
database = DatabaseMySQL()
announcements = Announcements(database.get_query_df(sql))
assert isinstance(announcements, Announcements)
def test_get_announcements():
"""Testing the announcement constructor."""
sql = "Select * from company_announcements where company_id = 1045 " \
"order by published_at DESC"
database = DatabaseMySQL()
announcements = Announcements(database.get_query_df(sql))
df = announcements.get_announcements()
assert isinstance(df, pd.DataFrame)
def test_generate_test_data():
"""Testing the function output structure."""
filename = os.path.join(directory, "../resources/testing/pre_sens_flag.csv")
df = pd.read_csv(filename)
announcements = Announcements(df)
assert isinstance(announcements, Announcements)
def test_add_pre_sens_flag():
"""Testing the addition of pre price sensitive flag."""
filename = os.path.join(directory, "../resources/testing/pre_sens_flag.csv")
df = | pd.read_csv(filename) | pandas.read_csv |
import anonypy
import pandas as pd
data = [
[6, "1", "test1", "x", 20],
[6, "1", "test1", "x", 30],
[8, "2", "test2", "x", 50],
[8, "2", "test3", "w", 45],
[8, "1", "test2", "y", 35],
[4, "2", "test3", "y", 20],
[4, "1", "test3", "y", 20],
[2, "1", "test3", "z", 22],
[2, "2", "test3", "y", 32],
]
columns = ["col1", "col2", "col3", "col4", "col5"]
categorical = set(("col2", "col3", "col4"))
def test_k_anonymity():
df = pd.DataFrame(data=data, columns=columns)
print(df)
for name in categorical:
df[name] = df[name].astype("category")
feature_columns = ["col1", "col2", "col3"]
sensitive_column = "col4"
p = anonypy.Preserver(df, feature_columns, sensitive_column)
rows = p.anonymize_k_anonymity(k=2)
dfn = pd.DataFrame(rows)
print(dfn)
def test_count_k_anonymity():
df = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03-page-population.ipynb (unless otherwise specified).
__all__ = ['get_dp_field_to_url_format_str', 'get_dp_field_to_title', 'format_id_values', 'single_site_data_to_ids_df',
'single_site_data_to_ids_md_str', 'get_datapackage_url_to_alt_indexes', 'get_datapackage_url_to_attributes',
'filter_dict', 'set_multi_index_names', 'create_multi_index_attrs_df', 'get_attrs_df_index_cols',
'create_single_index_attrs_df', 'idx_to_attr_name', 'get_field_class', 'format_attribute_value_types',
'construct_attr_to_field_schema', 'extract_datapackage_url_to_ids', 'extract_combined_attrs_df',
'extract_datapackage_url_to_dict_id_type', 'get_datapackage_url_to_attrs_md_str', 'construct_dataset_md_str',
'single_site_data_to_datasets_md_str', 'clean_dp_name', 'construct_downloads_md_str',
'extract_name_from_single_site_data', 'single_site_data_to_md_str', 'populate_and_save_template',
'clean_object_ids_to_names', 'get_object_ids_to_names', 'construct_object_docs']
# Cell
import json
import numpy as np
import pandas as pd
from frictionless import Package
from powerdict import extraction
import os
from tqdm import tqdm
from warnings import warn
from jinja2 import Template
# Cell
def get_dp_field_to_url_format_str(datapackage_json_fp):
package = Package(datapackage_json_fp, profile='tabular-data-package')
ids_resource = package.get_resource('ids')
id_field_to_url_format_str = {
field['name']: field['url_format']
for field
in ids_resource['schema']['fields']
if 'url_format' in field.keys()
}
return id_field_to_url_format_str
# Cell
def get_dp_field_to_title(datapackage_json_fp):
package = Package(datapackage_json_fp, profile='tabular-data-package')
ids_resource = package.get_resource('ids')
id_field_to_title = {
field['name']: field['title']
for field
in ids_resource['schema']['fields']
}
return id_field_to_title
# Cell
def format_id_values(id_values, id_type, id_field_to_url_format_str):
if id_type in id_field_to_url_format_str.keys():
url_format_str = id_field_to_url_format_str[id_type]
id_values_strs = [f'[{id_value}]({url_format_str.format(value=id_value)})' for id_value in id_values]
else:
id_values_strs = [str(id_value) for id_value in id_values]
return id_values_strs
# Cell
def single_site_data_to_ids_df(single_site_data, root_id, datapackage_json_fp, root_id_type='osuked_id'):
id_field_to_url_format_str = get_dp_field_to_url_format_str(datapackage_json_fp)
id_field_to_title = get_dp_field_to_title(datapackage_json_fp)
df_site_ids = | pd.DataFrame([{'Relationship': 'Root', 'ID Type': id_field_to_title[root_id_type], 'ID(s)': root_id}]) | pandas.DataFrame |
# pylint: disable=E1101
from pandas.util.py3compat import StringIO, BytesIO, PY3
from datetime import datetime
from os.path import split as psplit
import csv
import os
import sys
import re
import unittest
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
ensure_clean)
import pandas.util.testing as tm
import pandas as pd
import pandas.lib as lib
from pandas.util import py3compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
from pandas._parser import OverflowError
from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd not installed, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = | DataFrame(_seriesd, columns=['D', 'C', 'B', 'A']) | pandas.DataFrame |
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from werkzeug.utils import secure_filename
import numpy as np
import pandas as pd
import pickle
import os
cwd = os.getcwd()
pretrained_model = pickle.load(open(cwd + '\\ml_model\\model.pkl','rb'))
tfidf = pickle.load(open(cwd + '\\ml_model\\ngrams_vectorizer.pkl','rb'))
flairs = ['AMA', 'AskIndia', 'Business/Finance', 'Coronavirus', 'Food', 'Non-Political', 'Photography', 'Policy/Economy', 'Politics', 'Scheduled', 'Science/Technology', 'Sports']
# Importing Library Reddit Library
import praw
# Initializing Reddit API
reddit = praw.Reddit(client_id='client_id',
client_secret='client_secret',
user_agent='user_agent',
username='username',
password='password')
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
if type(text) == np.nan:
return np.nan
text = str(text)
text = text.lower()
text = REPLACE_BY_SPACE_RE.sub(' ', text)
text = BAD_SYMBOLS_RE.sub('', text)
text = ' '.join(word for word in text.split() if word not in STOPWORDS)
return text
def data_prep(res_url):
posts = reddit.submission(url = str(res_url))
res_data = {}
res_data["title"] = str(posts.title)
res_data["title_u"] = str(posts.title)
res_data["selftext"] = str(posts.selftext)
count = 0
posts.comments.replace_more(limit=50)
combined_comments = " "
for comment in posts.comments:
combined_comments += " " + comment.body
res_data["combined_comments"] = str(combined_comments)
res_data['title'] = clean_text(str(res_data['title']))
res_data['selftext'] = clean_text(str(res_data['selftext']))
res_data['combined_comments'] = clean_text(str(res_data['combined_comments']))
return res_data
def pred(pred_url):
features = data_prep(pred_url)
final_features = str(features['title']) + str(features['selftext']) + str(features['combined_comments'])
data = | pd.DataFrame({"content":[final_features]}) | pandas.DataFrame |
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]
arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
return request.param
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = pa.array(
data.to_numpy(object, na_value=None),
type=pa.from_numpy_dtype(data.dtype.numpy_dtype),
)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@ | td.skip_if_no("pyarrow", min_version="0.15.1.dev") | pandas.util._test_decorators.skip_if_no |
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.visualization_description.descriptive_tool import DescribeData
from src.models.data_modules import CheXpertDataModule
fig_path_report = '../Thesis-report/00_figures/cheXpert/'
save_figs = False
disease = 'Cardiomegaly'
def get_descriptive_cheXpert_data(data_set):
# Loading CheXpert
dm = CheXpertDataModule(**{
"target_disease": "Cardiomegaly",
'multi_label': False,
"uncertainty_approach": "U-Zeros",
'tiny_sample_data': False,
'extended_image_augmentation':False})
if data_set == "train":
meta_dat = dm.train_data.dataset_df.assign(
y = dm.train_data.y.squeeze()
)
if data_set == "val":
meta_dat = dm.val_data.dataset_df.assign(
y = dm.val_data.y.squeeze()
)
if data_set == "test":
meta_dat = dm.test_data.dataset_df.assign(
y = dm.test_data.y.squeeze()
)
if data_set == "all":
# Uncertainty approach
if dm.uncertainty_approach == 'U-Ones':
target_map = {
np.nan: 0, # unmentioned
0.0: 0, # negative
-1.0: 1, # uncertain
1.0: 1 # positive
}
elif dm.uncertainty_approach == 'U-Zeros':
target_map = {
np.nan: 0, # unmentioned
0.0: 0, # negative
-1.0: 0, # uncertain
1.0: 1 # positive
}
meta_dat = dm.dataset_df.assign(
y = lambda x: x[dm.target_disease].map(target_map)
)
# Adding Race from processed demo data
processed_demo = | pd.read_csv("data/CheXpert/processed/cheXpert_processed_demo_data.csv") | pandas.read_csv |
#!/usr/bin/env jupyter-console
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import KFold, cross_val_predict, cross_validate
from sklearn.linear_model import LassoCV, LinearRegression, RidgeCV
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.ensemble import HistGradientBoostingRegressor, RandomForestRegressor
from sklearn.compose import make_column_transformer
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib import gridspec
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np # linear algebra
import time
import logging
import sys
import os
KAGGLE_PROJ_PATH = os.path.join(os.getcwd())
# append path so we can import functions from local modules
sys.path.append(KAGGLE_PROJ_PATH)
from util import get_logger # noqa
from plot import generate_summary_plots, plot_regression_results, plot_regression_results2 # noqa
SEED = 12345
# Setup logging so we can avoid using print statements all over the place
global log
log = get_logger()
def apply_cleansing(df):
"""Apply data cleansing transformations and corrections to a dataframe. This is mostly centralized so data cleansing only happens in one place.
:param df: pd.DataFrame. A pandas Dataframe representing the ames housing price dataset
:returns: pd.DataFrame. A cleaned dataframe.
"""
# Exterior2nd has mismatching values compared to Exterior1st
df.Exterior2nd = (
df.Exterior2nd.replace("Wd Shng", "WdShing")
.replace("CmentBd", "CemntBd")
.replace("Brk Cmn", "BrkComm")
)
return df
def apply_imputation(df, cols_to_exclude=[]):
"""For columns with missing data, apply imputation techniques to fill in the missing data. Note that this function is not needed when using the sklearn Pipeline API as imputation happens within the pipeline.
:param df: pd.DataFrame. Data that contains all the columns needed to impute
:param cols_to_exclude: list(str). A list of columns that should not be factored into imputation. For example, say that the predictor variable of interest is a part of "df". The imputed values should not take into consideration the predictor variable as it would cause data leakage and pollute the final results.
:return df: pd.DataFrame. Dataframe with imputed values.
"""
# store a copy of pre-imputed data just in case additional analysis is needed.
data_pre_imp = (
df.select_dtypes(include=np.number).copy().drop(
cols_to_exclude, axis=1)
)
# impute mean using sklearn
# TODO: check to make sure the imputations make sense afterwards
imp_mean = IterativeImputer(random_state=SEED)
data_trans = imp_mean.fit_transform(data_pre_imp)
imputed_df = pd.DataFrame(data_trans, columns=data_pre_imp.columns)
for col in imputed_df:
df[col] = imputed_df[col]
return df
def get_area_cols(df):
"""Return all of the columns that represent area measurements in the Ames Housing Dataset.
:param df: pd.DataFrame. Ames Housing Dataset
:returns: list(str). List of column names.
"""
return list(filter(lambda _: any(x in _ for x in ["SF", "Area"]), df.columns)) + [
"LotFrontage",
"SalePrice",
]
def filter_suffix(ls, df, suffix="log"):
"""Filter a list of column names based on a provided suffix.
:param ls: list(str). List of column names.
:param df: pd.DataFrame. Dataframe containing columns that are being compared against.
:param suffix: str. Suffix present in the column name that should be filtered for.
:returns:
"""
return list(
map(lambda _: _ + "_" + suffix if (_ + suffix)
not in df.columns else _, ls)
)
def add_indicators(data):
"""Add indicator variables (using OneHotEncoder) to the Ames Housing Price Dataset. Note that this is done during a step when using the Pipeline API so this is only if building and fitting models without using the Pipeline API.
:param data: pd.DataFrame. A dataframe representing the Ames Housing Dataset.
:returns: pd.DataFrame. The source dataframe with indicator variables joined to it.
"""
exterior_indicators = (
# create indicator variables for both "Exterior" columns
pd.get_dummies(data.Exterior1st, drop_first=True).add(
pd.get_dummies(data.Exterior2nd, drop_first=True), fill_value=0
)
)
exterior_indicators = (
# rename them to be more descriptive
exterior_indicators.rename(
columns={
c: "ExteriorHas" + c.replace(" ", "")
for c in exterior_indicators.columns
}
)
# in cases where both the Exterior1st and Exterior2nd was the same, we don't care so set to 1
.replace(2, 1)
)
condition_indicators = | pd.get_dummies(data.Condition1, drop_first=True) | pandas.get_dummies |
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import datetime, time, json
from string import punctuation
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Convolution1D, GRU, MaxPooling1D
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras import initializers
from keras import backend as K
from keras.optimizers import SGD
from collections import defaultdict
# In[6]:
train = | pd.read_csv("../data/train.csv") | pandas.read_csv |
import argparse
import boto3
import botocore
import csv
import json
import lattice
import os
import pandas as pd
import re
import requests
import subprocess
import sys
import qcmetrics_mapper
from urllib.parse import urljoin
from bs4 import BeautifulSoup
EPILOG = '''
Extract summary info and QC metrics from a cellranger pipeline run.
Examples:
python %(prog)s -m production -a atac -d submissions-czi009kid/muto_humphreys_2020/Control_5/outs
python %(prog)s -m local -a rna -d submissions-czi012eye/chen_2020/19D013_foveaR_outs
For more details:
python %(prog)s --help
'''
def getArgs():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--dir', '-d',
help="s3 path to the cellranger outs directory or dragen html, or local path to a file that lists those")
parser.add_argument('--assay', '-a',
help="specify atac or rna")
parser.add_argument('--mode', '-m',
help='The machine to pull schema from.')
parser.add_argument('--pipeline', '-p',
help='The pipeline that generated the metrics.')
args = parser.parse_args()
return args
def schemify(value, prop_type):
if (prop_type == 'integer') and (str(value).endswith('.0') == True):
return str(value).strip('.0')
elif value == None:
return ''
else:
return str(value)
args = getArgs()
if not args.mode:
sys.exit('ERROR: --mode is required')
if not args.pipeline:
sys.exit('ERROR: --pipeline is required')
s3client = boto3.client("s3")
connection = lattice.Connection(args.mode)
server = connection.server
dir_list = args.dir
if os.path.isfile(dir_list):
directories = [line.rstrip('\n') for line in open(dir_list)]
else:
directories = dir_list.split(',')
in_schema = {}
out_schema = {}
genotypemetrics = []
if args.assay == 'rna':
obj_name = 'rna_metrics'
files_to_check = [
'metrics_summary.csv',
'web_summary.html'
]
elif args.assay == 'atac':
obj_name = 'atac_metrics'
files_to_check = [
'summary.json',
'web_summary.html'
]
else:
sys.exit('must specify rna or atac for --assay')
schema_url = urljoin(server, 'profiles/{}/?format=json'.format(obj_name))
full_schema = requests.get(schema_url).json()
schema_props = list(full_schema['properties'].keys())
schema_version = 'schema_version=' + (full_schema['properties']['schema_version']['default'])
if args.pipeline.lower() in ['cr','cellranger']:
value_mapping = qcmetrics_mapper.cellranger['value_mapping']
schema_mapping = qcmetrics_mapper.cellranger['schema_mapping']
should_match = qcmetrics_mapper.cellranger['should_match']
for direct in directories:
direct = direct.replace('s3://', '')
full_path = direct.rstrip('/')
bucket_name = full_path.split('/')[0]
outs_dir_path = full_path.replace(bucket_name + '/', '')
report_json = {'quality_metric_of': '<linkTo RawMatrixFile - filtered matrix .h5>'}
summary_file = 'web_summary.html'
try:
s3client.download_file(bucket_name, outs_dir_path + '/' + summary_file, summary_file)
except botocore.exceptions.ClientError:
print('Failed to find {} on s3'.format(summary_file))
else:
print(summary_file + ' downloaded')
with open(summary_file) as html_doc:
match_flag = False
soup = BeautifulSoup(html_doc, 'html.parser')
for x in soup.find_all('script'):
match = re.search("const data = ", x.string)
if match:
match_flag = True
end = match.end()
data = json.loads(x.string[end:])
if data.get('pipeline_info_table'):
pipeline_info_table = data.get('pipeline_info_table')
else:
pipeline_info_table = data['summary']['summary_tab']['pipeline_info_table']
info_list = pipeline_info_table['rows']
for pair in info_list:
report_json[pair[0]] = value_mapping.get(pair[1], pair[1])
if match_flag == False:
for x in soup.find_all('table', id='sample_table'):
for row in x.find_all('tr'):
col_count = 1
columns = row.find_all('td')
for column in columns:
if col_count == 1:
field = column.get_text().strip()
else:
value = column.get_text().strip()
if not value:
value = ''
col_count += 1
report_json[field] = value_mapping.get(value, value)
os.remove(summary_file)
print(summary_file + ' removed')
if args.assay == 'atac':
metrics_file = 'summary.json'
try:
s3client.download_file(bucket_name, outs_dir_path + '/' + metrics_file, metrics_file)
except botocore.exceptions.ClientError:
print('Failed to find {} on s3'.format(metrics_file))
else:
with open(metrics_file) as summary_json:
post_json = json.load(summary_json)
my_props = list(post_json.keys())
for prop in my_props:
if prop in schema_mapping.keys():
post_json[schema_mapping[prop]] = post_json[prop]
del post_json[prop]
else:
metrics_file = 'metrics_summary.csv'
try:
s3client.download_file(bucket_name, outs_dir_path + '/' + metrics_file, metrics_file)
except botocore.exceptions.ClientError:
print('Failed to find {} on s3'.format(metrics_file))
else:
with open(metrics_file, newline='') as csvfile:
spamreader = csv.reader(csvfile)
rows = list(spamreader)
headers = [header.lower().replace(' ','_') for header in rows[0]]
new_headers = [schema_mapping.get(header, header) for header in headers]
values = rows[1]
new_values = [value.strip('%') for value in values]
post_json = dict(zip(new_headers, new_values))
os.remove(metrics_file)
print(metrics_file + ' removed')
report_json.update(post_json)
final_values = {}
extra_values = {}
for prop, value in report_json.items():
if prop in schema_props:
final_values[prop] = schemify(value, full_schema['properties'][prop]['type'])
else:
extra_values[prop] = value
# CHANGE TO for k,v in should_match.items():
if prop in should_match.keys():
if report_json[prop] != report_json[should_match[prop]]:
print('WARNING: {} does not match {}'.format(should_match[prop], prop))
else:
print('all good: {} does match {}'.format(should_match[prop], prop))
in_schema[direct] = final_values
out_schema[direct] = extra_values
elif args.pipeline.lower() == 'dragen':
schema_mapping = qcmetrics_mapper.dragen['schema_mapping']
value_mapping = qcmetrics_mapper.dragen['value_mapping']
for file in directories:
file = file.replace('s3://', '')
full_path = file.rstrip('/')
bucket_name = full_path.split('/')[0]
outs_dir_path = '/'.join(full_path.split('/')[1:-1])
report_json = {'quality_metric_of': '<linkTo RawMatrixFile - filtered matrix .h5>'}
summary_file = full_path.split('/')[-1]
try:
s3client.download_file(bucket_name, outs_dir_path + '/' + summary_file, summary_file)
except botocore.exceptions.ClientError:
print('Failed to find {} on s3'.format(summary_file))
else:
print(summary_file + ' downloaded')
with open(summary_file) as html_doc:
soup = BeautifulSoup(html_doc, 'html.parser')
sc_metrics = soup.find('main', id='scrnaseq-metrics-page')
x = sc_metrics.find('table', {"class":"table table-striped table-hover"})
for row in x.find_all('tr'):
col_count = 1
columns = row.find_all('td')
for column in columns:
if col_count == 1:
field = column.get_text().strip()
field = schema_mapping.get(field, field)
else:
value = column.get_text().strip()
if field in value_mapping:
factor = value_mapping[field]['factor']
action = value_mapping[field]['action']
if action == 'multiply':
value = str(float(value) * factor)
report_json[field] = value
col_count += 1
map_metrics = soup.find('main', id='mapping-metrics-page')
x = map_metrics.find('table', {"class":"table table-striped"})
for row in x.find_all('tr'):
col_count = 1
columns = row.find_all('td')
for column in columns:
if col_count == 1:
field = column.get_text().strip()
field = schema_mapping.get(field, field)
elif col_count == 2:
value = column.get_text().strip()
report_json[field] = value
else:
per = column.get_text().strip()
if per:
field = field + ' %'
field = schema_mapping.get(field, field)
report_json[field] = per
col_count += 1
genotype_metrics = soup.find( 'main', id='genotype-demultiplexing-page')
tables = genotype_metrics.find_all('table', {"class":"table table-striped table-hover"})
x = tables[1]
for row in x.find_all('tr'):
col_count = 1
columns = row.find_all('td')
for column in columns:
if col_count == 1:
field = column.get_text().strip()
field = schema_mapping.get(field, field)
else:
value = column.get_text().strip()
report_json[field] = value
col_count += 1
x = tables[0]
rows = x.find_all('tr')
headers = rows[0].find_all('th', scope='col')
headers = [h.text for h in headers]
for row in rows[1:]:
sample_id = row.find('th')
values = [sample_id] + row.find_all('td')
values = [v.text for v in values]
d = {'file': summary_file}
d.update(dict(zip(headers, values)))
genotypemetrics.append(d)
os.remove(summary_file)
print(summary_file + ' removed')
final_values = {}
extra_values = {}
for prop, value in report_json.items():
if prop in schema_props:
final_values[prop] = schemify(value, full_schema['properties'][prop]['type'])
else:
extra_values[prop] = value
in_schema[file] = final_values
out_schema[file] = extra_values
else:
sys.exit('ERROR: --pipline not recognized, should be cellranger or dragen')
if genotypemetrics:
df = pd.DataFrame(genotypemetrics)
df.to_csv('genotype_metrics.tsv', sep='\t')
df = | pd.DataFrame(in_schema) | pandas.DataFrame |
'''
Created on Nov 3 2017
Modified on Nov 13 2019
Modified on March 25 2020
@author1: <NAME>
@author2: <NAME>
'''
import pandas as pd
import numpy as np
import os
import configuration
def makedir():
'''
Create a subject's directory in target path from configuration.py
'''
base_path = configuration.COLLECT_DATA_PATH
i = 0
while True:
i += 1
path = '%s%d/' % (base_path, i,)
is_exists=os.path.exists(path)
if not is_exists:
os.makedirs(path)
break
return i, path
def make_trial_dir(path, trial_num):
'''
Create trial's directory for subjects
Arguments:
path: the target path
trial_num: how much trail this subject will do.
'''
for i in range(1, int(trial_num)+1):
os.makedirs(path + "/trial_" + str(i))
def creat_subject(name, age, gender, trial_num):
'''
Create a subject csv file for each subject when they fill their information
Arguments:
name: the subject's name in English, a string
age: the subject's age, a integer
gender: the subject's gender, female or male
trial_num: the number of trials in which every subject needs to participate,
ranges from 1 to 40
'''
subject_id, path = makedir()
label_frame = pd.DataFrame(
columns=['trail_id', 'valence', 'arousal']
)
label_frame.to_csv(path+'label.csv', index=False)
# information_frame = pd.DataFrame(
# np.array(
# [
# [subject_id, name, age, gender],
#
# ]
# ),
# columns=['subject_id', 'name', 'age', 'gender']
# )
print("subject_id:", subject_id, ", name:", name, " age:", age, " gender:", gender)
information_frame = pd.DataFrame( | pd.read_csv("../dataset/collected_dataset/information.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
import openpyxl
from openpyxl.styles import PatternFill
import numpy as np
from collections import defaultdict
from scanner_map import searchKey, CertifiedManufacturerModelNameCTDict, CertifiedManufacturerCTDict, TrueManufacturerModelNameCTDict, TrueManufacturerCTDict
from scanner_map import ScannerType, CertifiedManufacturerModelNameICADict, CertifiedManufacturerICADict, TrueManufacturerModelNameICADict, TrueManufacturerICADict
from datetime import datetime
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
def highlight_columns(sheet, columns=[], color='A5A5A5', offset=2):
for col in columns:
cell = sheet.cell(1, col+offset)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
return sheet
def merge_defaultdicts(d,d1):
for k,v in d1.items():
if (k in d):
d[k].update(d1[k])
else:
d[k] = d1[k]
return d
def covertDate(date_str):
month_lookup = defaultdict(lambda: None, {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6, 'JUL':7, 'AUG':8,'SEP':9, 'OCT':10,'NOV':11, 'DEC':12})
day = str(date_str[0:2])
month = str(month_lookup[date_str[2:5]])
year = date_str[5:9]
s = year + month + day
return datetime.strptime(s, '%Y%m%d')
def checkModalities(modList0, modList1):
for m0 in modList0:
for m1 in modList1:
if m0==m1:
return True
return False
def splitScannerList(filepath_scanner):
#filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V05_manual.xlsx'
df_scanner = pd.read_excel(filepath_scanner, 'linear', index_col=0)
df_missing_CT = pd.DataFrame(columns=df_scanner.columns)
df_missing_XA = pd.DataFrame(columns=df_scanner.columns)
df_missing = df_scanner[(df_scanner['ECRF_MISSING']==True) & (df_scanner['ITT']!=2)]
for index, row in df_missing.iterrows():
if 'DICOM XA' in row['ManualCheck']:
df_missing_XA = df_missing_XA.append(row)
if 'DICOM CT' in row['ManualCheck']:
df_missing_CT = df_missing_CT.append(row)
# Update CT sheet
writer = pd.ExcelWriter(filepath_scanner, engine="openpyxl", mode="a")
# Update CT sheet
sheet_name = 'ECRF_MISSING_CT'
workbook = writer.book
df_missing_CT.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
# Update XA sheet
sheet_name = 'ECRF_MISSING_XA'
workbook = writer.book
df_missing_XA.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
writer.save()
# Read discharge data
filepath_dicom = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/discharge_dicom_27082020_OT.xlsx'
filepath_ecrf_study = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/ecrf_study_20200827.xlsx'
filepath_scanner_old = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V04_manual.xlsx'
filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence.xlsx'
df_dicom = pd.read_excel(filepath_dicom, 'linear', index_col=0)
#df_dicom=df_dicom[0:1000]
df_dicom.replace(to_replace=[np.nan], value='', inplace=True)
df_ecrf = pd.read_excel(filepath_ecrf_study, 'Tabelle1')
#df_ecrf=df_ecrf[0:1000]
df_scanner_old = pd.read_excel(filepath_scanner_old, 'linear', index_col=0)
df_scanner_old.replace(to_replace=[np.nan], value='', inplace=True)
columns_scanner_rename=['PatientID', 'Site', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan']
columns_scanner=['PatientID', 'Site', 'ITT', 'RD_MB',
'1. Date of CT', '1. Date of CT StudyInstanceUID',
'Date of ICA scan', 'Date of ICA scan StudyInstanceUID',
'Date of ICA scan 2', 'Date of ICA scan 2 StudyInstanceUID',
'Date of staged PCI 1', 'Date of staged PCI 1 StudyInstanceUID',
'Date of staged PCI 2', 'Date of staged PCI 2 StudyInstanceUID',
'Date of staged PCI 3', 'Date of staged PCI 3 StudyInstanceUID',
'duplicate entry',
'FFR', 'FFR StudyInstanceUID',
'MRI_visite',
'Date of Echo', 'Date of Echo StudyInstanceUID',
'Date of PET', 'Date of PET StudyInstanceUID',
'Date of SPECT:', 'Date of SPECT: StudyInstanceUID',
'Date of FU_CT-scan', 'Date of FU_CT-scan StudyInstanceUID',
'Date cec_ct', 'Date cec_ct StudyInstanceUID',
'Date pet ct', 'Date pet ct StudyInstanceUID',
'Date ldct', 'Date ldct StudyInstanceUID',
'ldct 3m', 'ldct 3m StudyInstanceUID',
'ldct 6m', 'ldct 6m StudyInstanceUID',
'ldct 12m', 'ldct 12m StudyInstanceUID',
'Date FU ICA scan', 'Date FU ICA scan StudyInstanceUID']
columns_scanner_missing = [x for x in columns_scanner if x not in columns_scanner_rename]
#columns_result = ['OK', 'DICOM_MISSING', 'ECRF_MISSING', 'DICOM_ECRF_MISMATCH']
columns_result = ['DICOM_MISSING', 'ECRF_MISSING', 'ECRF_MISSING_SeriesInstanceUID']
columns_ecrf=['Patient identifier', 'Centre name (mnpctrname)', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry ', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan:',
'Date cec_ct', 'Date pet ct', 'Date ldct:', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan:']
dates_required = ['1. Date of CT', 'Date of ICA scan', 'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3']
modalities_required = defaultdict(lambda: None, {'1. Date of CT': ['CT'], 'Date of ICA scan': ['XA'], 'Date of ICA scan 2': ['XA'],
'Date of staged PCI 1': ['XA'], 'Date of staged PCI 2': ['XA'], 'Date of staged PCI 3': ['XA']})
dates_sidestudy = ['FFR','Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m','ldct 12m', 'Date FU ICA scan']
modalities_sidestudy = defaultdict(lambda: None, {'FFR': ['XA'], 'Date of Echo': ['US'], 'Date of PET': ['CT','PT'], 'Date of SPECT:': ['CT','NM'], 'Date of FU_CT-scan': ['CT'],
'Date cec_ct': ['CT'], 'Date pet ct': ['PT'], 'Date ldct': ['CT'], 'ldct 3m': ['CT'], 'ldct 6m': ['CT'],'ldct 12m': ['CT'],
'Date FU ICA scan': ['XA']})
dates_all = dates_required + dates_sidestudy
# f = 'H:/cloud/cloud_data/Projects/BIOQIC/08_Research/PACSServer/date.sas7bdat'
# f = 'C:/Users/bernifoellmer/Downloads/SASVisualForecasting_sampledatasets/skinproduct_vfdemo.sas7bdat'
# db = pd.read_sas(f)
# Create dataframe with patient per line
df_scanner = pd.DataFrame()
df_dicom_study = df_dicom.drop_duplicates(subset=['StudyInstanceUID'], ignore_index=True)
# Convert modalities into list of modalities
df_dicom_study.reset_index(drop=True,inplace=True)
for index, row in df_dicom_study.iterrows():
print(index)
#sys.exit()
df = df_dicom[df_dicom['StudyInstanceUID']==row['StudyInstanceUID']]
modList=list(set(list(df['Modality'])))
modList_str = ','.join(modList)
df_dicom_study.loc[index, 'Modality'] = modList_str
df_ecrf_study = df_ecrf.rename(columns = dict(zip(columns_ecrf, columns_scanner_rename)))
df_ecrf_study = df_ecrf_study[columns_scanner_rename]
# Convert date
for ecrf_date in dates_all:
for index, row in df_ecrf_study.iterrows():
date_str = df_ecrf_study.loc[index, ecrf_date]
#print('ecrf_date', ecrf_date)
#print('index', index)
#print('date_str', date_str)
if (type(date_str)==str) and (not date_str=='.'):
df_ecrf_study.loc[index, ecrf_date] = covertDate(date_str)
# date_str = df_ecrf_study.loc[277, 'FFR']
# d=covertDate(date_str)
colmax=[]
for index_ecrf, row_ecrf in df_ecrf_study.iterrows():
#sys.exit()
df_patient = df_dicom_study[df_dicom_study['PatientID']==row_ecrf['PatientID']]
df_patient.sort_values('StudyDate', inplace=True)
df_patient.reset_index(inplace=True)
print('index_ecrf:', index_ecrf)
s = row_ecrf[columns_scanner_rename]
for index, row in df_patient.iterrows():
# Filter wrong ManufacturerModelName and Manufacturer
TrueManufacturerList = [row['Manufacturer']]
TrueManufacturer_str = ','.join(list(set(TrueManufacturerList)))
TrueManufacturer = searchKey(TrueManufacturerCTDict, TrueManufacturer_str)
TrueManufacturerModelNameList = [row['ManufacturerModelName']]
TrueManufacturerModelName_str = ','.join(list(set(TrueManufacturerModelNameList)))
TrueManufacturerModelName = searchKey(TrueManufacturerModelNameCTDict, TrueManufacturerModelName_str)
s['StudyDate' + '_' + str(index).zfill(2)] = datetime.strptime(str(row['StudyDate']), '%Y%m%d')
s['StudyInstanceUID' + '_' + str(index).zfill(2)] = row['StudyInstanceUID']
s['Modality' + '_' + str(index).zfill(2)] = row['Modality']
s['TrueManufacturer' + '_' + str(index).zfill(2)] = TrueManufacturer
s['TrueManufacturerModelName' + '_' + str(index).zfill(2)] = TrueManufacturerModelName
s['EcrfFound' + '_' + str(index).zfill(2)] = ''
if len(s.keys())>len(colmax):
colmax = list(s.keys())
df_scanner = df_scanner.append(s, ignore_index=True, sort=True)
# Add columns_scanner_missing
# Reindex columns
# df_scanner = df_scanner[colmax]
# df_scanner['DICOM_MISSING']=False
# df_scanner['ECRF_MISSING']=False
#for x in columns_scanner_missing: df_scanner[x]=''
colmax2=colmax.copy()
for x in columns_scanner_missing:
if ' StudyInstanceUID' in x:
#sys.exit()
k = x[0:-17]
idx = colmax2.index(k)
colmax2 = colmax2[0:idx+1] + [x] + colmax2[idx+1:]
for x in columns_scanner_missing: df_scanner[x]=''
df_scanner = df_scanner[colmax2]
df_scanner['DICOM_MISSING']=False
df_scanner['ECRF_MISSING']=False
df_scanner['ManualCheck']=''
df_scanner['Solved']=''
df_scanner['EI']=''
df_scanner['MB']=''
df_scanner['BF']=''
colmax2 = colmax2 + ['DICOM_MISSING', 'ECRF_MISSING', 'ManualCheck', 'Solved', 'EI', 'MB', 'BF']
# Create color dataframe
df_scanner_color = df_scanner.copy()
# Check dates from ecrf
columns_study = [c for c in df_scanner.columns if 'StudyDate' in c]
columns_study_mod = [c for c in df_scanner.columns if 'Modality' in c]
columns_study_id = [c for c in df_scanner.columns if 'StudyInstanceUID_' in c]
columns_study_found= [c for c in df_scanner.columns if 'Found in ecrf' in c]
modalities_all = merge_defaultdicts(modalities_required , modalities_sidestudy)
for index, row in df_scanner.iterrows():
print('index', index)
#if index==103:
# sys.exit()
dates_dicom = list(row[columns_study])
mod_dicom = list(row[columns_study_mod])
for k in dates_all:
if(not | pd.isnull(row[k]) | pandas.isnull |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
| tm.assert_index_equal(result, exp) | pandas._testing.assert_index_equal |
#!python
##################################################
# ACCESS QC Module
# Innovation Laboratory
# Center For Molecular Oncology
# Memorial Sloan Kettering Cancer Research Center
# maintainer: <NAME> (<EMAIL>)
#
#
# This module functions as an aggregation step to combine QC metrics
# across Waltz runs on different bam types.
import shutil
import logging
import argparse
import numpy as np
import pandas as pd
from python_tools.constants import *
from python_tools.util import to_csv
def unique_or_tot(x):
if TOTAL_LABEL in x:
return TOTAL_LABEL
else:
return PICARD_LABEL
def get_read_counts_table(path, pool):
"""
This method is only used to generate stats for un-collapsed bams
"""
read_counts_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts = pd.read_csv(read_counts_path, sep='\t')
# Melt our DF to get all values of the on target rate and duplicate rates as values
read_counts = pd.melt(read_counts, id_vars=[SAMPLE_ID_COLUMN], var_name='Category')
# We only want the read counts-related row values
read_counts = read_counts[~read_counts['Category'].isin(['bam', TOTAL_READS_COLUMN, UNMAPPED_READS_COLUMN, 'duplicate_fraction'])]
read_counts['method'] = read_counts['Category'].apply(unique_or_tot)
read_counts['pool'] = pool
# read_counts = read_counts.reset_index(drop=True)
return read_counts
def get_read_counts_total_table(path, pool):
"""
This table is used for "Fraction of Total Reads that Align to the Human Genome" plot
"""
full_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_total = pd.read_csv(full_path, sep='\t')
col_idx = ~read_counts_total.columns.str.contains(PICARD_LABEL)
read_counts_total = read_counts_total.iloc[:, col_idx]
read_counts_total['AlignFrac'] = read_counts_total[TOTAL_MAPPED_COLUMN] / read_counts_total[TOTAL_READS_COLUMN]
read_counts_total[TOTAL_OFF_TARGET_FRACTION_COLUMN] = 1 - read_counts_total[TOTAL_ON_TARGET_FRACTION_COLUMN]
read_counts_total['pool'] = pool
return read_counts_total
def get_coverage_table(path, pool):
"""
Coverage table
"""
full_path = os.path.join(path, AGBM_COVERAGE_FILENAME)
coverage_table = pd.read_csv(full_path, sep='\t')
coverage_table = pd.melt(coverage_table, id_vars=SAMPLE_ID_COLUMN, var_name='method', value_name='average_coverage')
coverage_table['method'] = coverage_table['method'].str.replace('average_coverage_', '')
coverage_table['pool'] = pool
return coverage_table
def get_collapsed_waltz_tables(path, method, pool):
"""
Creates read_counts, coverage, and gc_bias tables for collapsed bam metrics.
"""
read_counts_table_path = os.path.join(path, AGBM_READ_COUNTS_FILENAME)
read_counts_table = | pd.read_csv(read_counts_table_path, sep='\t') | pandas.read_csv |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
#!/usr/bin/env python
"""Generate Tableau data from pisa 2012 database."""
import pandas as pd
def return_time_category(value):
"""Return category of time value."""
category = {
1: 'Once or twice a month',
2: 'Once or twice a week',
3: 'Almost every day',
4: 'Every day',
0: 'Never or hardly ever'
}
if not | pd.isnull(value) | pandas.isnull |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod( | offsets.Minute() | pandas.tseries.offsets.Minute |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import rc
from sklearn.metrics import auc
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import sys
import time
# TeX fonts
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Times']})
class GSClassification():
'''
Executes grid search and cross-validation for many classification models.
Parameters:
models: list of potential classifiers
grid: grid search parameters
'''
def __init__(self, models, grid):
self.models = models
# instances only desired models.
self.grid_of_params = {k:v for k, v in grid.items() if k in self.models}
def apply_grid_search(self, X_train, y_train, k=5):
self.X_train = X_train
self.y_train = y_train
'''
Parameters:
X_train: 2D ndarray
y_train: 1D ndarray
k: cross-validation k-fold. Default: 5.
'''
# list of current compatible classifiers
compatible_classes = [SVC(), DecisionTreeClassifier(), KNeighborsClassifier(), LogisticRegression(), GaussianNB(), RandomForestClassifier(), SGDClassifier(), Perceptron()]
compatible_classes_str = [str(i) for i in compatible_classes if str(i) in self.grid_of_params.keys()]
self.classificators = [compatible_classes[i].fit(X_train, y_train) for i in range(len(compatible_classes)) if str(compatible_classes[i]) in self.grid_of_params.keys()]
self.model_name = []
self.accuracies = []
self.standar_dev = []
self.best_parameters = []
self.best_estimators = []
for i in range(len(self.classificators)):
start = time.time()
print("Executing grid search for {}.".format(compatible_classes_str[i]))
grid_search = GridSearchCV(estimator = self.classificators[i],
param_grid = self.grid_of_params[compatible_classes_str[i]],
scoring = 'accuracy',
cv = k,
n_jobs = -1,
verbose=1)
grid_search.fit(X_train, y_train)
self.accuracies.append(grid_search.best_score_)
self.best_parameters.append(grid_search.best_params_)
self.best_estimators.append(grid_search.best_estimator_)
self.standar_dev.append(grid_search.cv_results_['std_test_score'][grid_search.best_index_])
self.model_name.append(compatible_classes_str[i][0:-2])
end = time.time()
print ("Elapsed time: %.3fs"%(end-start))
# XGboost is special...
if 'XGBClassifier()' in self.grid_of_params.keys():
start = time.time()
xgb = XGBClassifier()
print("Executing grid search for XGBClassifier().")
grid_search = GridSearchCV(estimator = xgb,
param_grid = self.grid_of_params['XGBClassifier()'],
scoring = 'accuracy',
cv = k,
n_jobs = -1,
verbose=1)
grid_search.fit(X_train, y_train)
self.accuracies.append(grid_search.best_score_)
self.best_parameters.append(grid_search.best_params_)
self.standar_dev.append(grid_search.cv_results_['std_test_score'][grid_search.best_index_])
self.model_name.append('XGBClassifier')
end = time.time()
print ("Elapsed time: %.3fs"%(end-start))
xgb.fit(X_train, y_train)
self.classificators.append(xgb)
self.best_estimators.append(grid_search.best_estimator_)
def show_dataframe(self):
out = list(zip(self.model_name, self.accuracies, self.standar_dev)) #zip joins same index tuples of lists
resultsinDataFrame = | pd.DataFrame(out, columns = ['method', 'mean accuracy (%)', 'standard deviation (%)']) | pandas.DataFrame |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
import pytest
import choicemodels
from choicemodels.tools import MergedChoiceTable
@pytest.fixture
def obs():
d1 = {'oid': [0,1],
'obsval': [6,8],
'choice': [1,2]}
return pd.DataFrame(d1).set_index('oid')
@pytest.fixture
def alts():
d2 = {'aid': [0,1,2],
'altval': [10,20,30],
'w': [1,1,100]}
return | pd.DataFrame(d2) | pandas.DataFrame |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
| pd.to_datetime("2019") | pandas.to_datetime |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before child’s date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi = epi.sort_values(['CHILD', 'DECOM'])
epi_lead = epi.shift(1)
epi_lead = epi_lead.reset_index()
m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev'))
error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])]
error_list = error_cohort['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_502():
error = ErrorDefinition(
code='502',
description='Last year’s record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this year’s record.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last_no_dec = epi_last[epi_last['DEC'].isna()]
epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin()
epi_min_decom_df = epi.loc[epi_min_decoms_index, :]
merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner')
error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']]
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_153():
error = ErrorDefinition(
code='153',
description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.",
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
oc3 = dfs['OC3']
oc3_not_na = (
oc3['IN_TOUCH'].notna() &
oc3['ACTIV'].notna() &
oc3['ACCOM'].notna()
)
oc3_all_na = (
oc3['IN_TOUCH'].isna() &
oc3['ACTIV'].isna() &
oc3['ACCOM'].isna()
)
validation_error = ~oc3_not_na & ~oc3_all_na
validation_error_locations = oc3.index[validation_error]
return {'OC3': validation_error_locations.to_list()}
return error, _validate
def validate_166():
error = ErrorDefinition(
code='166',
description="Date of review is invalid or blank.",
affected_fields=['REVIEW'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
review = dfs['Reviews']
error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna()
validation_error_locations = review.index[error_mask]
return {'Reviews': validation_error_locations.to_list()}
return error, _validate
def validate_174():
error = ErrorDefinition(
code='174',
description="Mother's child date of birth is recorded but gender shows that the child is a male.",
affected_fields=['SEX', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
child_is_male = header['SEX'].astype(str) == '1'
mc_dob_recorded = header['MC_DOB'].notna()
error_mask = child_is_male & mc_dob_recorded
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.to_list()}
return error, _validate
def validate_180():
error = ErrorDefinition(
code='180',
description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41))
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_181():
error = ErrorDefinition(
code='181',
description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
code_list = ['0', '1']
fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
error_mask = (
oc2[fields_of_interest].notna()
& ~oc2[fields_of_interest].astype(str).isin(['0', '1'])
).any(axis=1)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_192():
error = ErrorDefinition(
code='192',
description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
intervention_blank = oc2['INTERVENTION_RECEIVED'].isna()
error_mask = misuse & intervention_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_193():
error = ErrorDefinition(
code='193',
description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0')
intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna()
error_mask = no_substance_misuse & intervention_not_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_197a():
error = ErrorDefinition(
code='197a',
description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
sdq_filled_in = oc2['SDQ_SCORE'].notna()
reason_filled_in = oc2['SDQ_REASON'].notna()
error_mask = sdq_filled_in & reason_filled_in
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_567():
error = ErrorDefinition(
code='567',
description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis_error = mis[mis['MIS_START'] > mis['MIS_END']]
return {'Missing': mis_error.index.to_list()}
return error, _validate
def validate_304():
error = ErrorDefinition(
code='304',
description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.',
affected_fields=['DUC'],
)
def _validate(dfs):
if 'UASC' not in dfs:
return {}
else:
uasc = dfs['UASC']
uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce')
uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce')
mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18))
return {'UASC': uasc.index[mask].to_list()}
return error, _validate
def validate_333():
error = ErrorDefinition(
code='333',
description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
adt = dfs['AD1']
adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce')
adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH>
mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna()
mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH'])
mask = mask1 | mask2
return {'AD1': adt.index[mask].to_list()}
return error, _validate
def validate_1011():
error = ErrorDefinition(
code='1011',
description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
oc3 = dfs['OC3']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
# If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :]
E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3']
oc3.reset_index(inplace=True)
cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner')
error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)
error_list = cohort_to_check['index'][error_mask].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'OC3': error_list}
return error, _validate
def validate_574():
error = ErrorDefinition(
code='574',
description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis.sort_values(['CHILD', 'MIS_START'], inplace=True)
mis.reset_index(inplace=True)
mis.reset_index(inplace=True) # Twice on purpose
mis['LAG_INDEX'] = mis['level_0'].shift(-1)
lag_mis = mis.merge(mis, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PREV'])
# We're only interested in cases where there is more than one row for a child.
lag_mis = lag_mis[lag_mis['CHILD'] == lag_mis['CHILD_PREV']]
# A previous MIS_END date is null
mask1 = lag_mis['MIS_END_PREV'].isna()
# MIS_START is before previous MIS_END (overlapping dates)
mask2 = lag_mis['MIS_START'] < lag_mis['MIS_END_PREV']
mask = mask1 | mask2
error_list = lag_mis['index'][mask].to_list()
error_list.sort()
return {'Missing': error_list}
return error, _validate
def validate_564():
error = ErrorDefinition(
code='564',
description='Child was missing or away from placement without authorisation and the date started is blank.',
affected_fields=['MISSING', 'MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isin(['M', 'A', 'm', 'a']) & mis['MIS_START'].isna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_566():
error = ErrorDefinition(
code='566',
description='The date that the child' + chr(
39) + 's episode of being missing or away from placement without authorisation ended has been completed but whether the child was missing or away without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isna() & mis['MIS_END'].notna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_436():
error = ErrorDefinition(
code='436',
description='Reason for new episode is that both child’s placement and legal status have changed, but this is not reflected in the episode data.',
affected_fields=['RNE', 'LS', 'PLACE', 'PL_POST', 'URN', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(-1)
epi.fillna(value={"LS": '*', "PLACE": '*', "PL_POST": '*', "URN": '*', "PLACE_PROVIDER": '*'}, inplace=True)
epi_merge = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PRE'])
epi_multi_row = epi_merge[epi_merge['CHILD'] == epi_merge['CHILD_PRE']]
epi_has_B_U = epi_multi_row[epi_multi_row['RNE'].isin(['U', 'B'])]
mask_ls = epi_has_B_U['LS'] == epi_has_B_U['LS_PRE']
mask1 = epi_has_B_U['PLACE'] == epi_has_B_U['PLACE_PRE']
mask2 = epi_has_B_U['PL_POST'] == epi_has_B_U['PL_POST_PRE']
mask3 = epi_has_B_U['URN'] == epi_has_B_U['URN_PRE']
mask4 = epi_has_B_U['PLACE_PROVIDER'] == epi_has_B_U['PLACE_PROVIDER_PRE']
error_mask = mask_ls | (mask1 & mask2 & mask3 & mask4)
error_list = epi_has_B_U[error_mask]['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_570():
error = ErrorDefinition(
code='570',
description='The date that the child started to be missing or away from placement without authorisation is after the end of the collection year.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
collection_end = | pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# --------------
import pandas as pd
from collections import Counter
# Load dataset
data = | pd.read_csv(path) | pandas.read_csv |
#!/usr/bin/python
# coding=utf-8
# 采用TextRank方法提取文本关键词
import pandas as pd
import jieba.analyse
"""
TextRank权重:
1、将待抽取关键词的文本进行分词、去停用词、筛选词性
2、以固定窗口大小(默认为5,通过span属性调整),词之间的共现关系,构建图
3、计算图中节点的PageRank,注意是无向带权图
"""
# 处理标题和摘要,提取关键词
def getKeywords_textrank(data,topK):
idList,titleList,abstractList = data['id'],data['title'],data['abstract']
ids, titles, keys = [], [], []
for index in range(len(idList)):
text = '%s。%s' % (titleList[index], abstractList[index]) # 拼接标题和摘要
jieba.analyse.set_stop_words("data/stopWord.txt") # 加载自定义停用词表
print( "\"",titleList[index],"\"" , " 10 Keywords - TextRank :")
keywords = jieba.analyse.textrank(text, topK=topK, allowPOS=('n','nz','v','vd','vn','l','a','d')) # TextRank关键词提取,词性筛选
keywords = [x for x in keywords if len(x) > 1] # 抽取前topK个词汇作为关键词
word_split = " ".join(keywords)
print(word_split)
# keys.append(word_split.encode("utf-8"))
keys.append(word_split)
ids.append(idList[index])
titles.append(titleList[index])
result = pd.DataFrame({"id": ids, "title": titles, "key": keys}, columns=['id', 'title', 'key'])
return result
def getKeywords_tr(path_out, data, path_stop, topK, pos):
labels, contents = data['label'], data['content']
ids, titles, keys = [], [], []
for index, text in enumerate(contents):
jieba.analyse.set_stop_words(path_stop)# 加载自定义停用词表
print('index=', index, ' textrank computing ', path_out)
keywords = jieba.analyse.textrank(text, topK=topK, allowPOS=pos) # TextRank关键词提取,词性筛选
if len(keywords) < 2:
print('textrank keywords <2:', keywords, ' index=', index, ' :', path_out, ' text:', text)
word_split = ",".join(keywords)
keys.append(word_split)
result = | pd.DataFrame({'label': labels, "content": contents, "textrank_keys": keys}, columns=['label','content', 'textrank_keys']) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assertTrue(result1.equals(result4))
self.assertTrue(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assertTrue(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assertTrue(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[30:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[60:100])
self.assertIsNone(result.freq)
result = rng[:50].intersection(rng[25:75])
self.assertEqual(result.freqstr, 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assertEqual(result.freq, rng.freq)
result = rng[:50].intersection(nofreq)
self.assertEqual(result.freq, rng.freq)
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
tm.assert_isinstance(the_min, Timestamp)
tm.assert_isinstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
self.assertEqual(rng.min(), rng[0])
self.assertEqual(rng.max(), rng[-1])
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)),
'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iget(-1))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iget(0))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0),
datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
self.assertEqual(df['Date'][0], dates[0][0])
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
self.assertTrue(s[0], Timestamp)
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
self.assertEqual(s[0], dates[0][0])
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3],
[0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
self.assertTrue(df.x1.dtype == 'M8[ns]')
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1,
weekday=3,
variation="nearest"))
self.assertEqual(dr[0], Timestamp('2013-01-31'))
self.assertEqual(dr[1], Timestamp('2014-01-30'))
class TimeConversionFormats(tm.TestCase):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [ Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301') ]
results2 = [ Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103') ]
for vals, expecteds in [ (values, (Index(results1), Index(results2))),
(Series(values),(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])) ]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
self.assertEqual(result, expected)
else:
self.assertTrue(result.equals(expected))
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222,19801222] + [19810105]*5)
expected = Series([ Timestamp(x) for x in s.apply(str) ])
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str),format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = dt.datetime.strptime(val, format)
self.assertEqual(result, exp)
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
#['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
#['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
#['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
class TestToDatetimeInferFormat(tm.TestCase):
def test_to_datetime_infer_datetime_format_consistent_format(self):
time_series = pd.Series(
pd.date_range('20000101', periods=50, freq='H')
)
test_formats = [
'%m-%d-%Y',
'%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f',
]
for test_format in test_formats:
s_as_dt_strings = time_series.apply(
lambda x: x.strftime(test_format)
)
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
self.assert_numpy_array_equal(with_format, no_infer)
self.assert_numpy_array_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00',
]))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
test_series = pd.Series(
np.array([
'Jan/01/2011',
'Feb/01/2011',
'Mar/01/2011',
]))
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
def test_to_datetime_infer_datetime_format_series_with_nans(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
np.nan,
'01/03/2011 00:00:00',
np.nan,
]))
self.assert_numpy_array_equal(
| pd.to_datetime(test_series, infer_datetime_format=False) | pandas.to_datetime |
"""
By <NAME>
nickc1.github.io
Functions to query the NDBC (http://www.ndbc.noaa.gov/).
The realtime data for all of their buoys can be found at:
http://www.ndbc.noaa.gov/data/realtime2/
Info about all of noaa data can be found at:
http://www.ndbc.noaa.gov/docs/ndbc_web_data_guide.pdf
What all the values mean:
http://www.ndbc.noaa.gov/measdes.shtml
Each buoy has the data:
File Parameters
---- ----------
.data_spec Raw Spectral Wave Data
.ocean Oceanographic Data
.spec Spectral Wave Summary Data
.supl Supplemental Measurements Data
.swdir Spectral Wave Data (alpha1)
.swdir2 Spectral Wave Data (alpha2)
.swr1 Spectral Wave Data (r1)
.swr2 Spectral Wave Data (r2)
.txt Standard Meteorological Data
Example:
import buoypy as bp
# Get the last 45 days of data
rt = bp.realtime(41013) #frying pan shoals buoy
ocean_data = rt.get_ocean() #get Oceanographic data
wave_data.head()
Out[7]:
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
2016-02-04 17:42:00 1.6 1.3 7.1 0.9 4.5 S S STEEP 5.3 169
2016-02-04 16:42:00 1.7 1.5 7.7 0.9 5.0 S S STEEP 5.4 174
2016-02-04 15:41:00 2.0 0.0 NaN 2.0 7.1 NaN S STEEP 5.3 174
2016-02-04 14:41:00 2.0 1.2 7.7 1.5 5.9 SSE SSE STEEP 5.5 167
2016-02-04 13:41:00 2.0 1.7 7.1 0.9 4.8 S SSE STEEP 5.7 175
TODO:
Make functions with except statements always spit out the same
column headings.
"""
import pandas as pd
import numpy as np
import datetime
class realtime:
def __init__(self, buoy):
self.link = 'http://www.ndbc.noaa.gov/data/realtime2/{}'.format(buoy)
def data_spec(self):
"""
Get the raw spectral wave data from the buoy. The seperation
frequency is dropped to keep the data clean.
Parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe (date, frequency)
data frame containing the raw spectral data. index is the date
and the columns are each of the frequencies
"""
link = "{}.{}".format(self.link, 'data_spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, skiprows=1, header=None,
parse_dates=[[0,1,2,3,4]], index_col=0)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
specs = df.iloc[:,1::2]
freqs = df.iloc[0,2::2]
specs.columns=freqs
#remove the parenthesis from the column index
specs.columns = [cname.replace('(','').replace(')','')
for cname in specs.columns]
return specs
def ocean(self):
"""
Retrieve oceanic data. For the buoys explored,
O2%, O2PPM, CLCON, TURB, PH, EH were always NaNs
Returns
-------
df : pandas dataframe
Index is the date and columns are:
DEPTH m
OTMP degc
COND mS/cm
SAL PSU
O2% %
02PPM ppm
CLCON ug/l
TURB FTU
PH -
EH mv
"""
link = "{}.{}".format(self.link, 'ocean')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['DEPTH','OTMP','COND','SAL']
df[cols] = df[cols].astype(float)
return df
def spec(self):
"""
Get the spectral wave data from the ndbc. Something is wrong with
the data for this parameter. The columns seem to change randomly.
Refreshing the data page will yield different column names from
minute to minute.
parameters
----------
buoy : string
Buoy number ex: '41013' is off wilmington, nc
Returns
-------
df : pandas dataframe
data frame containing the spectral data. index is the date
and the columns are:
HO, SwH, SwP, WWH, WWP, SwD, WWD, STEEPNESS, AVP, MWD
OR
WVHT SwH SwP WWH WWP SwD WWD STEEPNESS APD MWD
"""
link = "{}.{}".format(self.link, 'spec')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
try:
#units are in the second row drop them
#df.columns = df.columns + '('+ df.iloc[0] + ')'
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['WVHT','SwH','SwP','WWH','WWP','APD','MWD']
df[cols] = df[cols].astype(float)
except:
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['H0','SwH','SwP','WWH','WWP','AVP','MWD']
df[cols] = df[cols].astype(float)
return df
def supl(self):
"""
Get supplemental data
Returns
-------
data frame containing the spectral data. index is the date
and the columns are:
PRES hpa
PTIME hhmm
WSPD m/s
WDIR degT
WTIME hhmm
"""
link = "{}.{}".format(self.link, 'supl')
#combine the first five date columns YY MM DD hh mm and make index
df = pd.read_csv(link, delim_whitespace=True, na_values='MM',
parse_dates=[[0,1,2,3,4]], index_col=0)
#units are in the second row drop them
df.drop(df.index[0], inplace=True)
#convert the dates to datetimes
df.index = pd.to_datetime(df.index,format="%Y %m %d %H %M")
#convert to floats
cols = ['PRES','PTIME','WSPD','WDIR','WTIME']
df[cols] = df[cols].astype(float)
return df
def swdir(self):
"""
Spectral wave data for alpha 1.
Returns
-------
specs : pandas dataframe
Index is the date and the columns are the spectrum. Values in
the table indicate how much energy is at each spectrum.
"""
link = "{}.{}".format(self.link, 'swdir')
#combine the first five date columns YY MM DD hh mm and make index
df = | pd.read_csv(link,delim_whitespace=True,skiprows=1,na_values=999,
header=None, parse_dates=[[0,1,2,3,4]], index_col=0) | pandas.read_csv |
"""
Функции и классы для проведения WoE-преобразований
"""
import math
import warnings
import numpy as np
import pandas as pd
import sklearn as sk
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
class _GroupedPredictor(pd.DataFrame):
"""
Вспомогательный класс для удобства доступа к некоторым данным
"""
def get_predictor(self, x):
"""
Получение подвыборки по имени предиктора(ов)
Parameters
---------------
x : str/int/list-like
Предиктор или список предикторов
Returns:
-----------
self : pd.DataFrame
Часть датафрейма (самого себя)
"""
if isinstance(x, (list, set, tuple)):
return self[self["predictor"].isin(x)]
else:
return self[self["predictor"] == x]
def append(self, other):
return _GroupedPredictor(super().append(other))
class WoeTransformer(TransformerMixin, BaseEstimator):
"""Класс для построения и применения WOE группировки к датасету
Parameters
----------
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
save_data : bool, default False
Параметр, определяющий, нужно ли сохранить данные для обучения
трансформера внутри экземпляра класса
join_bad_categories : bool, default False
Определяет, должени ли трансформер предпринять попытку для объединения
катогориальных групп в более крупные
Warning
-------
join_bad_categories - Экспериментальная функция.
Способ группировки категорий нестабилен
Attributes
----------
stats : pandas.DataFrame
Результаты WOE-группировки по всем предикторам
predictors : list
Список предикторов, на которых была построена группировка
cat_values : dict[str, list]
Словарь со списками категорий по предикторам, переданный при обучении
alpha_values : dict[str, float]
Словарь со значениями alpha для регуляризации групп
possible groups : pandas.DataFrame
Данные о значениях предиктора, которые могли бы стать
отдельными категориями
bad_groups : pandas.DataFrame
Данные о группах, которые не удовлетворяют условиям
"""
def __repr__(self):
return "WoeTransformer(min_sample_rate={!r}, min_count={!r}, n_fitted_predictors={!r})".format(
self.min_sample_rate,
self.min_count,
len(self.predictors),
)
def __init__(
self,
min_sample_rate: float = 0.05,
min_count: int = 3,
save_data: bool = False,
join_bad_categories: bool = False,
):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alpha_values = {}
self.save_data = save_data
self.join_bad_categories = join_bad_categories
# -------------------------
# Функции интерфейса класса
# -------------------------
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
# Сохранение категориальных значений
self.cat_values = cat_values
# Валидация данных и решейпинг
if hasattr(self, "_validate_data"):
X, y = self._validate_and_convert_data(X, y)
if self.save_data:
self.data = X
self.target = y
# Инициализация коэффициентов для регуляризации групп
self.alpha_values = {i: 0 for i in X.columns}
self.alpha_values.update(alpha_values)
# Агрегация значений предикторов
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def transform(self, X, y=None):
"""
Применение обученного трансформера к новым данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм, который нужно преобразовать
Предикторы, которые не были сгруппированы ранее, будут
проигнорированы и выведется сообщение
y : pandas.Series
Игнорируется
Returns
-----------
transformed : pandas.DataFrame
Преобразованный датасет
"""
transformed = pd.DataFrame()
if hasattr(self, "_validate_data"):
try:
X, y = self._validate_and_convert_data(X, y)
except AttributeError:
pass
for i in X:
if i in self.predictors:
try:
transformed[i] = self._transform_single(X[i])
except Exception as e:
print(f"Transform failed on predictor: {i}", e)
else:
print(f"Column is not in fitted predictors list: {i}")
return transformed
def fit_transform(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
с последующим примененим группировки к тем же данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pandas.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-----------
transformed : pd.DataFrame
Преобразованный датасет
"""
self.fit(X, y, cat_values=cat_values, alpha_values=alpha_values)
return self.transform(X)
def plot_woe(self, predictors=None):
"""
Отрисовка одного или нескольких графиков группировки
Parameters
---------------
predictors : str or array, default None
Предиктор(ы), по которым нужны графики
-- если str - отрисовывается один график
-- если array - отрисовываются графики из списка
-- если None - отрисовываются все сгруппированные предикторы
Warning
-------
Запуск метода без аргументов может занять длительное время при большом
количестве предикторов
"""
if predictors is None:
predictors = self.predictors
elif isinstance(predictors, str):
predictors = [predictors]
elif isinstance(predictors, (list, tuple, set)):
predictors = predictors
_, axes = plt.subplots(figsize=(10, len(predictors) * 5), nrows=len(predictors))
try:
for i, col in enumerate(predictors):
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes[i])
except TypeError:
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes)
# return fig
def get_iv(self, sort=False):
"""Получение списка значений IV по предикторам
Parameters
----------
sort : bool, default False
Включает сортировку результата по убыванию IV
Returns
-------
pandas.Series
"""
try:
res = self.stats.groupby("predictor")["IV"].sum()
if sort:
res = res.sort_values(ascending=False)
res = dict(res)
except AttributeError as e:
print(f"Transformer was not fitted yet. {e}")
res = {}
return res
# -------------------------
# Внутренние функции над всем датасетом
# -------------------------
def _validate_and_convert_data(self, X, y):
"""Проверяеn входные данные, трансформирует в объекты pandas
Использует метод _validate_data из sklearn/base.py
"""
if hasattr(X, "columns"):
predictors = X.columns
else:
predictors = ["X" + str(i + 1) for i in range(X.shape[1])]
if y is None:
X_valid = self._validate_data(X, y, dtype=None, force_all_finite=False)
X_valid = pd.DataFrame(X, columns=predictors)
y_valid = None
else:
X_valid, y_valid = self._validate_data(
X, y, dtype=None, force_all_finite=False
)
y_valid = pd.Series(y, name="target")
X_valid = pd.DataFrame(X, columns=predictors)
return X_valid, y_valid
def _grouping(self, X, y):
"""
Применение группировки ко всем предикторам
"""
df = X.copy()
df = df.fillna("пусто")
df["target"] = y.copy()
# Группировка и расчет показателей
for col in df.columns[:-1]:
grouped_temp = self._group_single(df[col], y)
num_mask = self._get_nums_mask(grouped_temp["value"])
cat_val_mask = grouped_temp["value"].isin(self.cat_values.get(col, []))
is_all_categorical = all(~num_mask | cat_val_mask)
if self.join_bad_categories and is_all_categorical:
repl = self._get_cat_values_for_join(grouped_temp)
grouped_temp = self._group_single(df[col].replace(repl), y)
self.grouped = self.grouped.append(grouped_temp)
# Замена пустых значений обратно на np.nan ИЛИ преобразование в числовой тип
try:
self.grouped["value"] = self.grouped["value"].replace({"пусто": np.nan})
except TypeError:
self.grouped["value"] = pd.to_numeric(
self.grouped["value"], downcast="signed"
)
def _fit_numeric(self, X, y):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
Returns
-------
None
"""
res = pd.DataFrame()
for i in X:
res_i = self._fit_single(X[i], y)
res = res.append(res_i)
self.predictors.append(i)
self.stats = self.stats.append(res)
# -------------------------
# Внутренние функции над отдельными столбцами
# -------------------------
def _group_single(self, x, y):
"""
Агрегация данных по значениям предиктора.
Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений
и долю целевых в группе
Parameters:
---------------
X : pandas.DataFrame
Таблица данных для агрегации
y : pandas.Series
Целевая переменная
"""
col = x.name
df = pd.DataFrame({col: x.values, "target": y.values})
grouped_temp = df.groupby(col)["target"].agg(["count", "sum"]).reset_index()
grouped_temp.columns = ["value", "sample_count", "target_count"]
grouped_temp["sample_rate"] = (
grouped_temp["sample_count"] / grouped_temp["sample_count"].sum()
)
grouped_temp["target_rate"] = (
grouped_temp["target_count"] / grouped_temp["sample_count"]
)
grouped_temp.insert(0, "predictor", col)
return _GroupedPredictor(grouped_temp)
def _fit_single(self, x, y, gr_subset=None, cat_vals=None):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
gr_subset : _GroupedPredictor
Предиктор
"""
gr_subset_num = pd.DataFrame()
gr_subset_cat = pd.DataFrame()
col = x.name
if gr_subset is None:
gr_subset = self.grouped.get_predictor(col)
if cat_vals is None:
cat_vals = self.cat_values.get(col, [])
nan_mask = x.isna()
num_mask = self._get_nums_mask(x) & (~x.isin(cat_vals)) & (~nan_mask)
num_vals = x.loc[num_mask].unique()
try:
# Расчет коэффициентов тренда по числовым значениям предиктора
if num_mask.sum() > 0:
try:
poly_coefs = np.polyfit(
x.loc[num_mask].astype(float), y.loc[num_mask], deg=1
)
except np.linalg.LinAlgError as e:
print(f"Error in np.polyfit on predictor: '{col}'.\nError MSG: {e}")
print("Linear Least Squares coefficients were set to [1, 0]")
poly_coefs = np.array([1, 0])
self.trend_coefs.update({col: poly_coefs})
# Расчет монотонных границ
gr_subset_num = gr_subset[gr_subset["value"].isin(num_vals)].copy()
gr_subset_num["value"] = pd.to_numeric(gr_subset_num["value"])
gr_subset_num = gr_subset_num.sort_values("value")
borders = self._monotonic_borders(gr_subset_num, self.trend_coefs[col])
self.borders.update({col: borders})
# Применение границ к сгруппированным данным
gr_subset_num["groups"] = pd.cut(gr_subset_num["value"], borders)
gr_subset_num["type"] = "num"
except ValueError as e:
print(f"ValueError on predictor {col}.\nError MSG: {e}")
# Расчет коэффициентов тренда по категориальным значениям предиктора
if (~num_mask).sum() > 0:
gr_subset_cat = gr_subset[~gr_subset["value"].isin(num_vals)].copy()
gr_subset_cat["groups"] = gr_subset_cat["value"].fillna("пусто")
gr_subset_cat["type"] = "cat"
# Объединение числовых и категориальных значений
gr_subset = pd.concat([gr_subset_num, gr_subset_cat], axis=0, ignore_index=True)
# Расчет WOE и IV
alpha = self.alpha_values.get(col, 0)
res_i = self._statistic(gr_subset, alpha=alpha)
is_empty_exists = any(res_i["groups"].astype(str).str.contains("пусто"))
if is_empty_exists:
res_i["groups"].replace({"пусто": np.nan}, inplace=True)
return res_i
def _transform_single(self, x, stats=None):
"""
Применение группировки и WoE-преобразования
Parameters
---------------
x : pandas.Series
Значения предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
orig_index = x.index
X_woe = x.copy()
if stats is None:
stats = self.stats.get_predictor(X_woe.name)
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "num"
}
cat_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = stats.loc[stats["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = stats.loc[stats["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(stats.loc[stats["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = pd.to_numeric(
X_woe[(self._get_nums_mask(X_woe)) & (~X_woe.isin(cat_bounds))]
)
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = stats.loc[stats["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(x):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = pd.Series()
X_woe = pd.concat([X_woe_num, X_woe_cat, X_woe_oth]).reindex(orig_index)
X_woe = pd.to_numeric(X_woe, downcast="signed")
return X_woe
def _monotonic_borders(self, grouped, p):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
DF_grouping = grouped.copy().sort_values("value").reset_index()
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.iloc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = (
DF_iter["sample_count"] - DF_iter["target_count"]
)
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = self._check_groups(DF_iter)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = pd.concat([DF_j[["value"]], DF_iter], axis=1)
try:
min_ind = DF_iter.loc[
(DF_iter["check"]) & (DF_iter["opt"]), "target_rate"
].index.values[0]
score_j = DF_iter.loc[min_ind, "value"]
if (
len(R_borders) > 0 and score_j == R_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
except Exception:
break
min_ind += 1
R_borders.append(score_j)
# Проверка последней добавленной группы
if len(R_borders) > 0:
DF_iter = DF_grouping.loc[DF_grouping["value"] > R_borders[-1]]
sample_rate_i = DF_iter["sample_rate"].sum() # доля выборки
sample_count_i = DF_iter["sample_count"].sum() # количество наблюдений
target_count_i = DF_iter["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < self.min_sample_rate)
or (target_count_i < self.min_count)
or (non_target_count_i < self.min_count)
):
R_borders.remove(R_borders[-1]) # удаление последней границы
else:
predictor = DF_grouping["predictor"].iloc[0]
warnings.warn(
f"Couldn't find any borders for feature {predictor}.\n Borders set on (-inf, +inf)"
)
R_borders = [-np.inf] + R_borders + [np.inf]
return R_borders
def _check_groups(
self,
df,
sample_rate_col="sample_rate",
sample_count_col="sample_count",
target_count_col="target_count",
):
""" Проверить сгруппированные значения предиктора на соответствме условиям"""
cond_mask = (
(df[sample_rate_col] >= self.min_sample_rate - 10 ** -9)
& (df[sample_count_col] >= self.min_count)
& (df[target_count_col] >= self.min_count)
)
return cond_mask
def _get_cat_values_for_join(self, grouped):
"""Получить словарь для замены категорий на объединяемые
NOTE: Нужно тестирование
TODO: переписать на рекурсию
"""
df = grouped.copy()
cond_mask = ~self._check_groups(df)
res = df[
[
"predictor",
"value",
"sample_count",
"target_count",
"sample_rate",
"target_rate",
]
].copy()
res = res.sort_values(["sample_rate", "target_rate"])
res["cum_sample_rate"] = res["sample_rate"].cumsum()
res["check"] = cond_mask
res["check_reverse"] = ~cond_mask
res["check_diff"] = res["check"].astype(int).diff()
res["new_group"] = (res["check_diff"] == -1).astype(int)
res["exist_group"] = res["check_reverse"].astype(int).eq(1)
res.loc[~res["check_reverse"], "exist_group"] = np.NaN
res["exist_group_cum"] = res["exist_group"].cumsum().fillna(method="bfill")
res[["cum_sr", "cum_sc", "cum_tc"]] = res.groupby("exist_group_cum").agg(
{
"sample_rate": "cumsum",
"sample_count": "cumsum",
"target_count": "cumsum",
}
)
res["cum_sr_check"] = (
self._check_groups(res, "cum_sr", "cum_sc", "cum_tc")
.astype(int)
.diff()
.eq(1)
.astype(int)
.shift()
)
display(res)
res.loc[res["cum_sr_check"] != 1, "cum_sr_check"] = np.nan
res["cum_sr_check"] = res["cum_sr_check"].fillna(method="ffill").fillna(0)
res["group_number"] = res["exist_group_cum"] + res["cum_sr_check"]
repl = res.groupby("group_number").agg({"value": list}).to_dict()["value"]
repl = {k: "_".join(v) for k, v in repl.items()}
res["group_vals"] = res["group_number"].replace(repl)
t = dict(zip(res["value"], res["group_vals"]))
return t
def _plot_single_woe_grouping(self, stats, ax_pd=None):
"""
Построение графика по группировке предиктора
Parameters
---------------
stats : pandas.DataFrame
Статистика по каждой группе (результат работы функции statistic):
минимальное, максимальное значение, доля от общего объема выборки,
количество и доля целевых и нецелевых событий в каждой группе,
WOE и IV каждой группы
Должен содержать столбцы: 'sample_rate', 'target_rate', 'WOE'
ax_pd : matplotlib.Axes
Набор осей (subplot)
"""
# Расчеты
x2 = [stats["sample_rate"][:i].sum() for i in range(stats.shape[0])] + [
1
] # доля выборки с накоплением
x = [
np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)
] # средняя точка в группах
# Выделение нужной информации для компактности
woe = list(stats["WOE"])
height = list(stats["target_rate"]) # проблемность в группе
width = list(stats["sample_rate"]) # доля выборки на группу
# Визуализация
if ax_pd is None:
_, ax_pd = plt.subplots(figsize=(8, 5))
# Столбчатая диаграмма доли целевых в группах
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
# График значений WOE по группам
ax_woe = ax_pd.twinx() # дубликат осей координат
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
# Линия нулевого значения WOE
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
# Настройка осей координат
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# Расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
# Границы и сетка для столбчатой диаграммы
ax_pd.set_ylim([0, max_pd])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_pd.legend(bbox_to_anchor=(1.05, 0.83), loc=[0.2, -0.25], fontsize=14)
# Границы и сетка для графика WOE
ax_woe.set_ylim([-max_woe, max_woe])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
ax_woe.legend(bbox_to_anchor=(1.05, 0.92), loc=[0.6, -0.25], fontsize=14)
plt.title(
"Группировка предиктора {}".format(stats.loc[0, "predictor"]), fontsize=18
)
# Для категориальных
n_cat = stats.loc[stats["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(bbox_to_anchor=(1.05, 0.76), loc=[0.15, -0.33], fontsize=14)
plt.tight_layout()
def _get_possible_groups(self):
"""
Поиск возможных групп в значениях предикторов после агрегации
"""
self.possible_groups = pd.DataFrame()
# Выделение значений предиктора с достаточным кол-вом наблюдений и
# не отмеченных, как категориальные
for i in self.predictors:
cat_vals = self.cat_values.get(i, [])
DF_i1 = self.grouped.get_predictor(i).copy()
DF_i1 = DF_i1.loc[
(DF_i1["sample_rate"] > self.min_sample_rate)
& (~DF_i1["value"].isin(cat_vals))
]
# Выделение всех значений предиктора, не отмеченных, как категориальные
DF_i2 = self.grouped.get_predictor(i).copy()
DF_i2 = DF_i2.loc[(~DF_i2["value"].isin(cat_vals))]
# Выбор значений: которые не равны бесконености и при этом не являются числами
L = ~(DF_i2["value"] == np.inf) & (~(self._get_nums_mask(DF_i2["value"])))
DF_i2 = DF_i2.loc[L]
# Объединение найденных значений в одну таблицу
DF_i = pd.concat((DF_i1, DF_i2), ignore_index=True).drop_duplicates()
self.possible_groups = self.possible_groups.append(DF_i)
def _get_bad_groups(self):
"""
Поиск групп: не удовлетворяющих условиям
"""
self.bad_groups = self.stats.loc[
(self.stats["sample_rate"] < self.min_sample_rate)
| (self.stats["target_count"] < self.min_count)
| (self.stats["sample_count"] - self.stats["target_count"] < self.min_count)
]
def _regularize_groups(self, stats, alpha=0):
"""расчет оптимальной целевой для группы на основе готовой woe-группировки
формула и детали в видео
https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
pd = (y_local * K + Y_global * alpha) / (K + alpha)"""
Y_global = stats["target_count"].sum() / stats["sample_count"].sum()
K = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = (stats["target_rate"] * K + Y_global * alpha) / (
K + alpha
)
stats["target_count"] = np.floor(
stats["sample_count"] * stats["target_rate"]
).astype(int)
return stats
def _statistic(self, grouped, alpha=0):
"""
Расчет статистики по группам предиктора: минимальное, максимальное значение, доля от
общего объема выборки, количество и доля целевых и нецелевых событий в каждой группе
А также расчет WOE и IV каждой группы
Parameters
---------------
grouped : pandas.DataFrame
Данные полученных групп предиктора. Кол-во строк совпадает с кол-вом
уникальных значений предиктора.
Должен содержать столбцы: 'sample_count', 'target_count', 'groups'
alpha : float, default 0
Коэффициент регуляризации групп
Returns
---------------
stats : pandas.DataFrame
Агрегированные данные по каждой группе
"""
nothing = 10 ** -6
stats = grouped.groupby(["predictor", "groups"], sort=False).agg(
{
"type": "first",
"sample_count": "sum",
"target_count": "sum",
"value": ["min", "max"],
},
)
stats.columns = ["type", "sample_count", "target_count", "min", "max"]
stats.reset_index(inplace=True)
stats["sample_rate"] = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = stats["target_count"] / stats["sample_count"]
stats = self._regularize_groups(stats, alpha=alpha)
# Расчет WoE и IV
samples_num = stats["sample_count"].sum()
events = stats["target_count"].sum()
non_events = samples_num - events
stats["non_events_i"] = stats["sample_count"] - stats["target_count"]
stats["event_rate_i"] = stats["target_count"] / (events + nothing)
stats["non_event_rate_i"] = stats["non_events_i"] / (non_events + nothing)
stats["WOE"] = np.log(
stats["non_event_rate_i"] / (stats["event_rate_i"] + nothing) + nothing
)
stats["IV"] = stats["WOE"] * (stats["non_event_rate_i"] - stats["event_rate_i"])
return stats
def _calc_trend_coefs(self, x, y):
"""
Расчет коэффициентов тренда
Parameters
---------------
x : pandas.Series
Значения предиктора
y : pandas.Series
Целевая переменная
Returns
-----------
dict[str, tuple[float, float]]
"""
return {x.name: np.polyfit(x, y, deg=1)}
# Служебные функции
def _reset_state(self):
self.trend_coefs = {}
self.borders = {}
self.cat_values = {}
self.predictors = []
self.grouped = _GroupedPredictor()
self.stats = _GroupedPredictor()
def _get_nums_mask(self, x):
# if x.apply(lambda x: isinstance(x, str)).sum() == len(x):
# return pd.Series(False, index=x.index)
# else:
# mask = pd.to_numeric(x, errors="coerce").notna()
mask = pd.to_numeric(x, errors="coerce").notna()
return mask
class WoeTransformerRegularized(WoeTransformer):
"""
Класс для построения и применения WOE группировки к датасету с применением
регуляризации малых групп
"""
def __init__(self, min_sample_rate=0.05, min_count=3, alphas=None, n_seeds=30):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alphas = 100 if alphas is None else alphas
self.alpha_values = {}
self.n_seeds = n_seeds
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
self.cat_values = cat_values
self.regularization_stats = _GroupedPredictor()
for col in tqdm(X.columns, desc="Searching alphas"):
temp_alpha = self._cat_features_alpha_logloss(
X[col], y, self.alphas, self.n_seeds
)
self.alpha_values.update({col: temp_alpha})
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def _cat_features_alpha_logloss(self, x, y, alphas, seed=30):
"""
функция расчета IV, GINI и logloss для категориальных
переменных с корректировкой целевой по alpha
"""
# задаем промежуточную функцию для WOE преобразования переменной из исходного датафрейма
# по рассчитанным WOE из IVWOE
def calc_woe_i(row_value, stats):
return stats.loc[stats["groups"] == row_value, "WOE"].values[0]
predictor = x.name
target = y.name
df = pd.DataFrame({predictor: x.values, target: y.values})
df[predictor] = df[predictor].fillna("NO_INFO")
L_logloss_mean = []
GINI_IV_mean = []
for alpha_i in alphas:
logloss_i = []
GINI_i = []
IV_i = []
for seed_i in range(seed):
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=seed_i, stratify=y
)
# Группировка значений предиктора с текущим alpha
df_i = self._group_single(X_train, y_train)
df_i["groups"] = df_i["value"].fillna("пусто")
df_i["type"] = "cat"
# Обучение и применение группировки к обучающему набору
WOE_i = self._fit_single(X_train, y_train, df_i)
WOE_i = self._regularize_groups(WOE_i, alpha_i)
# расчет оптимальной целевой для группы, формула и детали в видео
# https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
# pd = (y_local * K + Y_global * alpha) / (K + alpha)
Y_global = y_train.mean()
K = WOE_i["sample_count"] / WOE_i["sample_count"].sum()
WOE_i["target_rate"] = (
WOE_i["target_rate"] * K + Y_global * alpha_i
) / (K + alpha_i)
WOE_i["target_count"] = np.floor(
WOE_i["sample_count"] * WOE_i["target_rate"]
).astype(int)
X_test_WOE = self._transform_single(X_test, WOE_i)
roc_auc_i = sk.metrics.roc_auc_score(y_test, X_test_WOE)
# Подстановка регуляризованной доли целевой вместо каждой группы
target_transformed = X_test_WOE.replace(
dict(zip(WOE_i["WOE"], WOE_i["target_rate"]))
)
# Запись значений
logloss_i.append(
sk.metrics.log_loss(y_test, target_transformed.fillna(0))
)
IV_i.append(WOE_i["IV"].sum())
GINI_i.append(abs(2 * roc_auc_i - 1))
# Запись средних значений
L_logloss_mean.append([alpha_i, np.mean(logloss_i)])
GINI_IV_mean.append([alpha_i, np.mean(GINI_i), np.mean(IV_i)])
alpha_GINI_IV = pd.DataFrame(GINI_IV_mean, columns=["alpha", "GINI", "IV"])
alpha_GINI_IV.insert(0, "predictor", predictor)
self.regularization_stats = self.regularization_stats.append(alpha_GINI_IV)
# Индекс значения alpha с наименьшим логлоссом
min_logloss_ind = np.argmin(L_logloss_mean, axis=0)[1]
alpha_opt = L_logloss_mean[min_logloss_ind][0]
return alpha_opt
########################
# Комплект ускоренных версий функции #
########################
# Сильно отстал от класса, но в точности повторяет функциональность Vanilla
def grouping(DF_data_i, low_acc=False):
"""
Агрегация данных по значениям предиктора. Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений и долю целевых в группе
Parameters
---------------
DF_data_i : pandas.DataFrame
Таблица данных для агрегации, должна содержать поля 'predictor' и 'target'.
Поле target при этом должно состоять из 0 и 1, где 1 - целевое событие
low_acc : int, default None
Параметр для округления значений предиктора.
Если None, то предиктор не округляется.
Если целое неотрицательное число, параметр используется для определения
количества знаков после запятой, остальные значения игнорируются
Returns
---------------
DF_grouping : pandas.DataFrame
Таблица с агрегированными данными по значениям предиктора
"""
# Округение, если аргумент принимает допустимые значения
if low_acc and type(low_acc) is int and low_acc > 0:
DF_data_i = DF_data_i[["predictor", "target"]].round(low_acc)
# Группировка и расчет показателей
DF_grouping = (
DF_data_i.groupby("predictor")["target"].agg(["count", "sum"]).reset_index()
)
DF_grouping.columns = ["predictor", "sample_count", "target_count"]
DF_grouping["sample_rate"] = (
DF_grouping["sample_count"] / DF_grouping["sample_count"].sum()
)
DF_grouping["target_rate"] = (
DF_grouping["target_count"] / DF_grouping["sample_count"]
)
return DF_grouping
def monotonic_borders(DF_grouping, p, min_sample_rate=0.05, min_count=3):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.loc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = DF_iter["sample_count"] - DF_iter["target_count"]
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = (
(DF_iter["sample_rate"] >= min_sample_rate - 10 ** -9)
& (DF_iter["target_count"] >= min_count)
& (DF_iter["non_target_count"] >= min_count)
)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = | pd.concat([DF_j[["predictor"]], DF_iter], axis=1) | pandas.concat |
from typing import Type
import pytest
from pandas import Categorical, Series
from pandas.testing import assert_frame_equal
from datar.core.grouped import *
from datar.base import NA, mean, as_character
from datar import f, get_versions
from pipda.function import FastEvalFunction
def test_instance():
df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy({"a": [1, 2, 3]}, _group_vars=["a"])
assert isinstance(gf, DataFrameGroupBy)
assert gf.equals(df)
assert gf.attrs["_group_drop"]
gf = DataFrameGroupBy(
{"a": [1, 2, 3]}, _group_vars=["a"], _group_drop=False
)
assert not gf.attrs["_group_drop"]
gf = DataFrameGroupBy(df, _group_vars=["a"])
assert gf.equals(df)
assert gf.attrs["_group_drop"]
# group_by() nothing should be implemented in dplyr.group_by()
# because we have to make sure it's a DataFrame, instead of a
# DataFrameGroupBy object
#
# df.attrs['a'] = 1
# gf = DataFrameGroupBy(df)
# assert gf.equals(df)
# assert 'a' not in gf.attrs
# assert gf._group_vars == []
def test_group_data():
df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy(df, _group_vars="a")
exp = DataFrame(
[
[1, [0]],
[2, [1]],
[3, [2]],
],
columns=["a", "_rows"],
)
assert gf._group_data.equals(exp)
# gf = DataFrameGroupBy(df)
# assert gf._group_data.equals(DataFrame({"_rows": [[0, 1, 2]]}))
# pandas bug: https://github.com/pandas-dev/pandas/issues/35202
# df = DataFrame({"a": [1, 2, 3, NA, NA]})
# df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy(df, _group_vars=["a"])
exp = DataFrame(
# [[1, [0]], [2, [1]], [3, [2]], [NA, [3, 4]]], columns=["a", "_rows"]
[[1, [0]], [2, [1]], [3, [2]]],
columns=["a", "_rows"],
)
assert_frame_equal(gf._group_data, exp)
def test_group_data_cat():
df = DataFrame({"a": Categorical([1, NA, 2], categories=[1, 2, 3])})
gf = DataFrameGroupBy(df, _group_vars=["a"])
exp = DataFrame(
[
[1, [0]],
[2, [2]],
# [NA, [1]],
],
columns=["a", "_rows"],
)
# categorical kept
exp["a"] = Categorical(exp["a"], categories=[1, 2, 3])
assert_frame_equal(gf._group_data, exp)
gf = DataFrameGroupBy(df, _group_vars=["a"], _group_drop=False)
exp = DataFrame(
[
[1, [0]],
[2, [2]],
[3, []],
# [NA, [1]],
],
columns=["a", "_rows"],
)
exp["a"] = Categorical(exp["a"], categories=[1, 2, 3])
assert gf._group_data.equals(exp)
def test_multi_cats():
df = DataFrame(
{
"a": Categorical([1, NA, NA, NA], categories=[1, 2]),
"b": Categorical([2, 2, 2, 3], categories=[2, 3]),
"c": Categorical([3, NA, NA, NA], categories=[3]),
"d": Categorical([4, NA, NA, NA], categories=[4]),
}
)
gf = DataFrameGroupBy(df, _group_vars=list("abcd"), _group_drop=False)
# assert len(gf._group_data) == 11
# It's 4 with pandas 1.2
# But 3 with pandas 1.3
if (
get_versions(False).pandas < "1.3.0"
): # note when it comes to '1.11.x' vs '1.2.x'
assert len(gf._group_data) == 4
else:
assert len(gf._group_data) == 3
gf = DataFrameGroupBy(df, _group_vars=list("abcd"), _group_drop=True)
if (
get_versions(False).pandas < "1.3.0"
): # note when it comes to '1.11.x' vs '1.2.x'
assert len(gf._group_data) == 4
else:
assert len(gf._group_data) == 3
def test_0row_df():
df = DataFrame({"a": [], "b": []})
gf = DataFrameGroupBy(df, _group_vars=["a"])
assert gf._group_data.shape == (0, 2)
assert gf._group_data.columns.tolist() == ["a", "_rows"]
df = DataFrame({"a": Categorical([], categories=[1, 2]), "b": []})
gf = DataFrameGroupBy(df, _group_vars=["a"])
assert gf._group_data.shape == (0, 2)
assert gf._group_data.columns.tolist() == ["a", "_rows"]
gf = DataFrameGroupBy(df, _group_vars=["a"], _group_drop=False)
assert gf._group_data.shape == (2, 2)
def test_apply():
df = DataFrame({"a": Categorical([1, 2], categories=[1, 2, 3])})
def n(subdf, skip=None):
if subdf.attrs["_group_index"] == skip:
return None
return DataFrame(
{
"n": [
len(
subdf.attrs["_group_data"].loc[
subdf.attrs["_group_index"], "_rows"
]
)
]
}
)
gf = DataFrameGroupBy(df, _group_vars=["a"])
out = gf._datar_apply(n)
# 3 lost
exp = DataFrame({"a": Categorical(df.a, categories=[1, 2]), "n": [1, 1]})
assert_frame_equal(out, exp)
gf = DataFrameGroupBy(df, _group_vars=["a"], _group_drop=False)
out = gf._datar_apply(n)
exp = DataFrame(
{"a": Categorical([1, 2, 3], categories=[1, 2, 3]), "n": [1, 1, 0]}
)
assert_frame_equal(out, exp)
out = gf._datar_apply(n, skip=0)
exp = DataFrame(
{"a": Categorical([2, 3], categories=[1, 2, 3]), "n": [1, 0]}
)
assert_frame_equal(out, exp)
def test_agg():
df = DataFrame(dict(a=[1, 1, 2, 2], b=[1, 2, 3, 4]))
gf = DataFrameGroupBy(df, _group_vars="a")
out = gf._datar_apply(None, _mappings=dict(c=f.b.mean()), _method="agg")
assert_frame_equal(out, DataFrame(dict(a=[1, 2], c=[1.5, 3.5])))
# numpy functions
out = gf._datar_apply(
None,
_mappings=dict(
c=FastEvalFunction(
mean, args=(f.b,), kwargs={"na_rm": True}, dataarg=False
)
),
_method="agg",
)
assert_frame_equal(out, DataFrame(dict(a=[1, 2], c=[1.5, 3.5])))
# numpy functions with na_rm
out = gf._datar_apply(
None,
_mappings=dict(
c=FastEvalFunction(
mean, args=(f.b,), kwargs={"na_rm": True}, dataarg=False
)
),
_method="agg",
)
assert_frame_equal(out, DataFrame(dict(a=[1, 2], c=[1.5, 3.5])))
# fail
with pytest.raises(TypeError, match="not callable"):
gf._datar_apply(
None,
_mappings=dict(
c=FastEvalFunction(
as_character, args=(f.b,), kwargs={}, dataarg=False
)
),
_method="agg",
)
with pytest.raises(TypeError, match="not callable"):
gf._datar_apply(None, _mappings=dict(c=1), _method="agg")
# no groupdata
out = gf._datar_apply(
None, _mappings=dict(c=f.b.mean()), _method="agg", _groupdata=False
)
assert_frame_equal(out, DataFrame(dict(c=[1.5, 3.5])))
# drop index
out = gf._datar_apply(
None, _mappings=dict(c=f.b.cummax()), _method="agg", _groupdata=False
)
assert_frame_equal(out, DataFrame(dict(c=[1, 2, 3, 4])))
# def test_construct_with_give_groupdata():
# df = DataFrame({"a": [1, 2, 3]})
# gf = DataFrameGroupBy(df, _group_vars=["a"])
# gf2 = DataFrameGroupBy(df, _group_vars=["a"], _group_data=gf._group_data)
# assert gf._group_data.equals(gf2._group_data)
def test_apply_returns_none():
df = DataFrame({"a": []})
gf = DataFrameGroupBy(df, _group_vars=["a"])
out = gf._datar_apply(lambda df: None)
assert out.shape == (0, 1)
def test_repr():
df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy(df, _group_vars=["a"])
out = repr(gf)
assert "[Groups: a (n=3)]" in out
def test_copy():
df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy(df, _group_vars=["a"])
gf2 = gf.copy(copy_grouped=True)
assert isinstance(gf2, DataFrameGroupBy)
assert gf.attrs["_group_vars"] == gf2.attrs["_group_vars"]
assert gf._group_data.equals(gf2._group_data)
gf3 = gf.copy(deep=False, copy_grouped=True)
assert gf3.attrs["_group_vars"] is gf.attrs["_group_vars"]
assert gf3.attrs["_group_drop"] is gf.attrs["_group_drop"]
assert gf3._group_data is gf._group_data
def test_gf_repr_html():
df = DataFrame({"a": [1, 2, 3]})
gf = DataFrameGroupBy(df, _group_vars=["a"])
assert "[Groups: ['a'] (n=3)]" in gf._repr_html_()
# rowwise ---------------------------------------
def test_rowwise():
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
rf = DataFrameRowwise(df)
assert rf._group_data.columns.tolist() == ["_rows"]
assert rf._group_data.shape == (3, 1)
out = rf._datar_apply(lambda df: | Series({"c": df.a + df.b}) | pandas.Series |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/Data-Mining/ #
# --------------------------------------------------------------------------- #
# Created : Tuesday, March 9th 2021, 12:24:24 am #
# Last Modified : Tuesday, March 9th 2021, 12:24:24 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2021 nov8.ai #
# =========================================================================== #
# =========================================================================== #
# 1. LIBRARIES #
# =========================================================================== #
#%%
# System and python libraries
from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# Regression based estimators
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
# Data Source
from data import AmesData
| pd.set_option('display.max_rows', None) | pandas.set_option |
"""
data hash pandas / numpy objects
"""
import itertools
from typing import Optional
import numpy as np
from pandas._libs import Timestamp
import pandas._libs.hashing as hashing
from pandas.core.dtypes.cast import infer_dtype_from_scalar
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
# 16 byte long hashing key
_default_hash_key = "0123456789123456"
def _combine_hash_arrays(arrays, num_items: int):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, "Fed in wrong num_items"
out += np.uint64(97531)
return out
def hash_pandas_object(
obj,
index: bool = True,
encoding: str = "utf8",
hash_key: Optional[str] = _default_hash_key,
categorize: bool = True,
):
"""
Return a data hash of the Index/Series/DataFrame.
Parameters
----------
index : bool, default True
Include the index in the hash (if Series/DataFrame).
encoding : str, default 'utf8'
Encoding for data & key when strings.
hash_key : str, default _default_hash_key
Hash_key for string key to encode.
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False)
elif isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
h = Series(h, index=obj, dtype="uint64", copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
if index:
index_iter = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values
for _ in [None]
)
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.items())
num_items = len(obj.columns)
if index:
index_hash_generator = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values # noqa
for _ in [None]
)
num_items += 1
# keep `hashes` specifically a generator to keep mypy happy
_hashes = itertools.chain(hashes, index_hash_generator)
hashes = (x for x in _hashes)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
else:
raise TypeError(f"Unexpected type for hashing {type(obj)}")
return h
def hash_tuples(vals, encoding="utf8", hash_key: str = _default_hash_key):
"""
Hash an MultiIndex / list-of-tuples efficiently
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : str, default 'utf8'
hash_key : str, default _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = MultiIndex.from_tuples(vals)
# create a list-of-Categoricals
vals = [
Categorical(vals.codes[level], vals.levels[level], ordered=False, fastpath=True)
for level in range(vals.nlevels)
]
# hash the list-of-ndarrays
hashes = (
_hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals
)
h = _combine_hash_arrays(hashes, len(vals))
if is_tuple:
h = h[0]
return h
def hash_tuple(val, encoding: str = "utf8", hash_key: str = _default_hash_key):
"""
Hash a single tuple efficiently
Parameters
----------
val : single tuple
encoding : str, default 'utf8'
hash_key : str, default _default_hash_key
Returns
-------
hash
"""
hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val)
h = _combine_hash_arrays(hashes, len(val))[0]
return h
def _hash_categorical(c, encoding: str, hash_key: str):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : str
hash_key : str
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
# Convert ExtensionArrays to ndarrays
values = np.asarray(c.categories.values)
hashed = hash_array(values, encoding, hash_key, categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isna()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype="uint64")
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
def hash_array(
vals,
encoding: str = "utf8",
hash_key: str = _default_hash_key,
categorize: bool = True,
):
"""
Given a 1d array, return an array of deterministic integers.
Parameters
----------
vals : ndarray, Categorical
encoding : str, default 'utf8'
Encoding for data & key when strings.
hash_key : str, default _default_hash_key
Hash_key for string key to encode.
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, "dtype"):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke).
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
elif is_extension_array_dtype(dtype):
vals, _ = vals._values_for_factorize()
dtype = vals.dtype
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(dtype, np.complex128):
return hash_array(np.real(vals)) + 23 * hash_array(np.imag(vals))
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
elif isinstance(dtype, np.bool):
vals = vals.astype("u8")
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view("i8").astype("u8", copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
vals = vals.view(f"u{vals.dtype.itemsize}").astype("u8")
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories), ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
# we have mixed types
vals = hashing.hash_object_array(
vals.astype(str).astype(object), hash_key, encoding
)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xBF58476D1CE4E5B9)
vals ^= vals >> 27
vals *= np.uint64(0x94D049BB133111EB)
vals ^= vals >> 31
return vals
def _hash_scalar(
val, encoding: str = "utf8", hash_key: str = _default_hash_key
) -> np.ndarray:
"""
Hash scalar value.
Parameters
----------
val : scalar
encoding : str, default "utf8"
hash_key : str, default _default_hash_key
Returns
-------
1d uint64 numpy array of hash value, of length 1
"""
if isna(val):
# this is to be consistent with the _hash_categorical implementation
return np.array([np.iinfo(np.uint64).max], dtype="u8")
if getattr(val, "tzinfo", None) is not None:
# for tz-aware datetimes, we need the underlying naive UTC value and
# not the tz aware object or pd extension type (as
# infer_dtype_from_scalar would do)
if not isinstance(val, Timestamp):
val = Timestamp(val)
val = val.tz_convert(None)
dtype, val = | infer_dtype_from_scalar(val) | pandas.core.dtypes.cast.infer_dtype_from_scalar |
## Machine learning testing script
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import posanal as usrpos
import random
plot_ML = True
data_init = pd.read_csv('results_exp1-5.csv')
# ---- Data Pre-processing ---- #
dataset = data_init
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:,6].values
#print dataset
#print '\nX = '
#print X
#print '\nY = '
#print y
# ---- Split into training and testing ---- #
results_RF = pd.DataFrame()
results_SLR = | pd.DataFrame() | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
#Code ends here
data= | pd.read_csv(path) | pandas.read_csv |
# This file is part of Patsy
# Copyright (C) 2011-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
__all__ = ["C", "guess_categorical", "CategoricalSniffer",
"categorical_to_int"]
# How we handle categorical data: the big picture
# -----------------------------------------------
#
# There is no Python/NumPy standard for how to represent categorical data.
# There is no Python/NumPy standard for how to represent missing data.
#
# Together, these facts mean that when we receive some data object, we must be
# able to heuristically infer what levels it has -- and this process must be
# sensitive to the current missing data handling, because maybe 'None' is a
# level and maybe it is missing data.
#
# We don't know how missing data is represented until we get into the actual
# builder code, so anything which runs before this -- e.g., the 'C()' builtin
# -- cannot actually do *anything* meaningful with the data.
#
# Therefore, C() simply takes some data and arguments, and boxes them all up
# together into an object called (appropriately enough) _CategoricalBox. All
# the actual work of handling the various different sorts of categorical data
# (lists, string arrays, bool arrays, pandas.Categorical, etc.) happens inside
# the builder code, and we just extend this so that it also accepts
# _CategoricalBox objects as yet another categorical type.
#
# Originally this file contained a container type (called 'Categorical'), and
# the various sniffing, conversion, etc., functions were written as methods on
# that type. But we had to get rid of that type, so now this file just
# provides a set of plain old functions which are used by patsy.build to
# handle the different stages of categorical data munging.
import numpy as np
import six
from patsy import PatsyError
from patsy.util import (SortAnythingKey,
safe_scalar_isnan,
iterable,
have_pandas, have_pandas_categorical,
have_pandas_categorical_dtype,
safe_is_pandas_categorical,
pandas_Categorical_from_codes,
pandas_Categorical_categories,
pandas_Categorical_codes,
safe_issubdtype,
no_pickling, assert_no_pickling)
if have_pandas:
import pandas
# Objects of this type will always be treated as categorical, with the
# specified levels and contrast (if given).
class _CategoricalBox(object):
def __init__(self, data, contrast, levels):
self.data = data
self.contrast = contrast
self.levels = levels
__getstate__ = no_pickling
def C(data, contrast=None, levels=None):
"""
Marks some `data` as being categorical, and specifies how to interpret
it.
This is used for three reasons:
* To explicitly mark some data as categorical. For instance, integer data
is by default treated as numerical. If you have data that is stored
using an integer type, but where you want patsy to treat each different
value as a different level of a categorical factor, you can wrap it in a
call to `C` to accomplish this. E.g., compare::
dmatrix("a", {"a": [1, 2, 3]})
dmatrix("C(a)", {"a": [1, 2, 3]})
* To explicitly set the levels or override the default level ordering for
categorical data, e.g.::
dmatrix("C(a, levels=["a2", "a1"])", balanced(a=2))
* To override the default coding scheme for categorical data. The
`contrast` argument can be any of:
* A :class:`ContrastMatrix` object
* A simple 2d ndarray (which is treated the same as a ContrastMatrix
object except that you can't specify column names)
* An object with methods called `code_with_intercept` and
`code_without_intercept`, like the built-in contrasts
(:class:`Treatment`, :class:`Diff`, :class:`Poly`, etc.). See
:ref:`categorical-coding` for more details.
* A callable that returns one of the above.
"""
if isinstance(data, _CategoricalBox):
if contrast is None:
contrast = data.contrast
if levels is None:
levels = data.levels
data = data.data
return _CategoricalBox(data, contrast, levels)
def test_C():
c1 = C("asdf")
assert isinstance(c1, _CategoricalBox)
assert c1.data == "asdf"
assert c1.levels is None
assert c1.contrast is None
c2 = C("DATA", "CONTRAST", "LEVELS")
assert c2.data == "DATA"
assert c2.contrast == "CONTRAST"
assert c2.levels == "LEVELS"
c3 = C(c2, levels="NEW LEVELS")
assert c3.data == "DATA"
assert c3.contrast == "CONTRAST"
assert c3.levels == "NEW LEVELS"
c4 = C(c2, "NEW CONTRAST")
assert c4.data == "DATA"
assert c4.contrast == "NEW CONTRAST"
assert c4.levels == "LEVELS"
assert_no_pickling(c4)
def guess_categorical(data):
if safe_is_pandas_categorical(data):
return True
if isinstance(data, _CategoricalBox):
return True
data = np.asarray(data)
if safe_issubdtype(data.dtype, np.number):
return False
return True
def test_guess_categorical():
if have_pandas_categorical:
c = pandas.Categorical.from_array([1, 2, 3])
assert guess_categorical(c)
if have_pandas_categorical_dtype:
assert guess_categorical(pandas.Series(c))
assert guess_categorical(C([1, 2, 3]))
assert guess_categorical([True, False])
assert guess_categorical(["a", "b"])
assert guess_categorical(["a", "b", np.nan])
assert guess_categorical(["a", "b", None])
assert not guess_categorical([1, 2, 3])
assert not guess_categorical([1, 2, 3, np.nan])
assert not guess_categorical([1.0, 2.0, 3.0])
assert not guess_categorical([1.0, 2.0, 3.0, np.nan])
def _categorical_shape_fix(data):
# helper function
# data should not be a _CategoricalBox or pandas Categorical or anything
# -- it should be an actual iterable of data, but which might have the
# wrong shape.
if hasattr(data, "ndim") and data.ndim > 1:
raise PatsyError("categorical data cannot be >1-dimensional")
# coerce scalars into 1d, which is consistent with what we do for numeric
# factors. (See statsmodels/statsmodels#1881)
if (not iterable(data)
or isinstance(data, (six.text_type, six.binary_type))):
data = [data]
return data
class CategoricalSniffer(object):
def __init__(self, NA_action, origin=None):
self._NA_action = NA_action
self._origin = origin
self._contrast = None
self._levels = None
self._level_set = set()
def levels_contrast(self):
if self._levels is None:
levels = list(self._level_set)
levels.sort(key=SortAnythingKey)
self._levels = levels
return tuple(self._levels), self._contrast
def sniff(self, data):
if hasattr(data, "contrast"):
self._contrast = data.contrast
# returns a bool: are we confident that we found all the levels?
if isinstance(data, _CategoricalBox):
if data.levels is not None:
self._levels = tuple(data.levels)
return True
else:
# unbox and fall through
data = data.data
if safe_is_pandas_categorical(data):
# pandas.Categorical has its own NA detection, so don't try to
# second-guess it.
self._levels = tuple(pandas_Categorical_categories(data))
return True
# fastpath to avoid doing an item-by-item iteration over boolean
# arrays, as requested by #44
if hasattr(data, "dtype") and safe_issubdtype(data.dtype, np.bool_):
self._level_set = set([True, False])
return True
data = _categorical_shape_fix(data)
for value in data:
if self._NA_action.is_categorical_NA(value):
continue
if value is True or value is False:
self._level_set.update([True, False])
else:
try:
self._level_set.add(value)
except TypeError:
raise PatsyError("Error interpreting categorical data: "
"all items must be hashable",
self._origin)
# If everything we've seen is boolean, assume that everything else
# would be too. Otherwise we need to keep looking.
return self._level_set == set([True, False])
__getstate__ = no_pickling
def test_CategoricalSniffer():
from patsy.missing import NAAction
def t(NA_types, datas, exp_finish_fast, exp_levels, exp_contrast=None):
sniffer = CategoricalSniffer(NAAction(NA_types=NA_types))
for data in datas:
done = sniffer.sniff(data)
if done:
assert exp_finish_fast
break
else:
assert not exp_finish_fast
assert sniffer.levels_contrast() == (exp_levels, exp_contrast)
if have_pandas_categorical:
# We make sure to test with both boxed and unboxed pandas objects,
# because we used to have a bug where boxed pandas objects would be
# treated as categorical, but their levels would be lost...
preps = [lambda x: x,
C]
if have_pandas_categorical_dtype:
preps += [pandas.Series,
lambda x: C(pandas.Series(x))]
for prep in preps:
t([], [prep(pandas.Categorical.from_array([1, 2, None]))],
True, (1, 2))
# check order preservation
t([], [prep(pandas_Categorical_from_codes([1, 0], ["a", "b"]))],
True, ("a", "b"))
t([], [prep(pandas_Categorical_from_codes([1, 0], ["b", "a"]))],
True, ("b", "a"))
# check that if someone sticks a .contrast field onto our object
obj = prep(pandas.Categorical.from_array(["a", "b"]))
obj.contrast = "CONTRAST"
t([], [obj], True, ("a", "b"), "CONTRAST")
t([], [C([1, 2]), C([3, 2])], False, (1, 2, 3))
# check order preservation
t([], [C([1, 2], levels=[1, 2, 3]), C([4, 2])], True, (1, 2, 3))
t([], [C([1, 2], levels=[3, 2, 1]), C([4, 2])], True, (3, 2, 1))
# do some actual sniffing with NAs in
t(["None", "NaN"], [C([1, np.nan]), C([10, None])],
False, (1, 10))
# But 'None' can be a type if we don't make it represent NA:
sniffer = CategoricalSniffer(NAAction(NA_types=["NaN"]))
sniffer.sniff(C([1, np.nan, None]))
# The level order here is different on py2 and py3 :-( Because there's no
# consistent way to sort mixed-type values on both py2 and py3. Honestly
# people probably shouldn't use this, but I don't know how to give a
# sensible error.
levels, _ = sniffer.levels_contrast()
assert set(levels) == set([None, 1])
# bool special cases
t(["None", "NaN"], [C([True, np.nan, None])],
True, (False, True))
t([], [C([10, 20]), C([False]), C([30, 40])],
False, (False, True, 10, 20, 30, 40))
# exercise the fast-path
t([], [np.asarray([True, False]), ["foo"]],
True, (False, True))
# check tuples too
t(["None", "NaN"], [C([("b", 2), None, ("a", 1), np.nan, ("c", None)])],
False, (("a", 1), ("b", 2), ("c", None)))
# contrasts
t([], [C([10, 20], contrast="FOO")], False, (10, 20), "FOO")
# no box
t([], [[10, 30], [20]], False, (10, 20, 30))
t([], [["b", "a"], ["a"]], False, ("a", "b"))
# 0d
t([], ["b"], False, ("b",))
from nose.tools import assert_raises
# unhashable level error:
sniffer = CategoricalSniffer(NAAction())
assert_raises(PatsyError, sniffer.sniff, [{}])
# >1d is illegal
assert_raises(PatsyError, sniffer.sniff, np.asarray([["b"]]))
# returns either a 1d ndarray or a pandas.Series
def categorical_to_int(data, levels, NA_action, origin=None):
assert isinstance(levels, tuple)
# In this function, missing values are always mapped to -1
if safe_is_pandas_categorical(data):
data_levels_tuple = tuple(pandas_Categorical_categories(data))
if not data_levels_tuple == levels:
raise PatsyError("mismatching levels: expected %r, got %r"
% (levels, data_levels_tuple), origin)
# pandas.Categorical also uses -1 to indicate NA, and we don't try to
# second-guess its NA detection, so we can just pass it back.
return pandas_Categorical_codes(data)
if isinstance(data, _CategoricalBox):
if data.levels is not None and tuple(data.levels) != levels:
raise PatsyError("mismatching levels: expected %r, got %r"
% (levels, tuple(data.levels)), origin)
data = data.data
data = _categorical_shape_fix(data)
try:
level_to_int = dict(zip(levels, range(len(levels))))
except TypeError:
raise PatsyError("Error interpreting categorical data: "
"all items must be hashable", origin)
# fastpath to avoid doing an item-by-item iteration over boolean arrays,
# as requested by #44
if hasattr(data, "dtype") and safe_issubdtype(data.dtype, np.bool_):
if level_to_int[False] == 0 and level_to_int[True] == 1:
return data.astype(np.int_)
out = np.empty(len(data), dtype=int)
for i, value in enumerate(data):
if NA_action.is_categorical_NA(value):
out[i] = -1
else:
try:
out[i] = level_to_int[value]
except KeyError:
SHOW_LEVELS = 4
level_strs = []
if len(levels) <= SHOW_LEVELS:
level_strs += [repr(level) for level in levels]
else:
level_strs += [repr(level)
for level in levels[:SHOW_LEVELS//2]]
level_strs.append("...")
level_strs += [repr(level)
for level in levels[-SHOW_LEVELS//2:]]
level_str = "[%s]" % (", ".join(level_strs))
raise PatsyError("Error converting data to categorical: "
"observation with value %r does not match "
"any of the expected levels (expected: %s)"
% (value, level_str), origin)
except TypeError:
raise PatsyError("Error converting data to categorical: "
"encountered unhashable value %r"
% (value,), origin)
if have_pandas and isinstance(data, pandas.Series):
out = pandas.Series(out, index=data.index)
return out
def test_categorical_to_int():
from nose.tools import assert_raises
from patsy.missing import NAAction
if have_pandas:
s = pandas.Series(["a", "b", "c"], index=[10, 20, 30])
c_pandas = categorical_to_int(s, ("a", "b", "c"), NAAction())
assert np.all(c_pandas == [0, 1, 2])
assert np.all(c_pandas.index == [10, 20, 30])
# Input must be 1-dimensional
assert_raises(PatsyError,
categorical_to_int,
pandas.DataFrame({10: s}), ("a", "b", "c"), NAAction())
if have_pandas_categorical:
constructors = [pandas_Categorical_from_codes]
if have_pandas_categorical_dtype:
def Series_from_codes(codes, categories):
c = pandas_Categorical_from_codes(codes, categories)
return | pandas.Series(c) | pandas.Series |
from datetime import datetime
from dateutil.tz import tzlocal
import pytest
from pandas.compat import IS64
from pandas import (
DateOffset,
DatetimeIndex,
Index,
Series,
bdate_range,
date_range,
)
import pandas._testing as tm
from pandas.tseries.offsets import (
BDay,
Day,
Hour,
)
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps:
def test_ops_properties_basic(self, datetime_series):
# sanity check that the behavior didn't change
# GH#7206
for op in ["year", "day", "second", "weekday"]:
msg = f"'Series' object has no attribute '{op}'"
with pytest.raises(AttributeError, match=msg):
getattr(datetime_series, op)
# attribute access should still work!
s = Series({"year": 2000, "month": 1, "day": 10})
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
@pytest.mark.parametrize(
"freq,expected",
[
("A", "day"),
("Q", "day"),
("M", "day"),
("D", "day"),
("H", "hour"),
("T", "minute"),
("S", "second"),
("L", "millisecond"),
("U", "microsecond"),
],
)
def test_resolution(self, request, tz_naive_fixture, freq, expected):
tz = tz_naive_fixture
if freq == "A" and not IS64 and isinstance(tz, tzlocal):
request.node.add_marker(
pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
)
idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_infer_freq(self, freq_sample):
# GH 11018
idx = | date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) | pandas.date_range |
import sys
import subprocess
import pandas as pd
import os
from pathlib import Path
import time
import datetime
import ast
from configparser import ConfigParser
'''Reading the Config file from command line argument'''
parser = ConfigParser()
pd.set_option('display.max_columns', None)
config_file = sys.argv[1]
parser.read(config_file)
'''Printing the variables '''
data_path = parser.get('paths', 'data_path')
action_path = data_path + "/" + "action"
print("action_path is " + str(action_path))
apps_home_path = parser.get('paths', 'apps_home')
print("apps_home_path is " + str(apps_home_path))
'''Creating DF for apps to be tracked from config file'''
all_apps = parser.get('yarn_apps', 'appname')
list_apps = ast.literal_eval(all_apps)
# print("List of Apps are : " + str(list_apps))
df_apps = | pd.DataFrame(list_apps, columns=['app_name', 'app_schedule', 'app_user', 'app_submit_file', 'app_type']) | pandas.DataFrame |
"""
Experimental manager based on storing a collection of 1D arrays
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Callable,
TypeVar,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
from pandas._typing import (
ArrayLike,
Hashable,
)
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
astype_array_safe,
infer_dtype_from_scalar,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_datetime64_ns_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_object_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
PandasDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.inference import is_inferred_bool_dtype
from pandas.core.dtypes.missing import (
array_equals,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.array_algos.quantile import quantile_compat
from pandas.core.array_algos.take import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PandasArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
sanitize_array,
)
from pandas.core.indexers import (
maybe_convert_indices,
validate_indices,
)
from pandas.core.indexes.api import (
Index,
ensure_index,
)
from pandas.core.internals.base import (
DataManager,
SingleDataManager,
interleaved_dtype,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
external_values,
new_block,
to_native_types,
)
if TYPE_CHECKING:
from pandas import Float64Index
T = TypeVar("T", bound="ArrayManager")
class ArrayManager(DataManager):
"""
Core internal data structure to implement DataFrame and Series.
Alternative to the BlockManager, storing a list of 1D arrays instead of
Blocks.
This is *not* a public API class
Parameters
----------
arrays : Sequence of arrays
axes : Sequence of Index
verify_integrity : bool, default True
"""
__slots__ = [
"_axes", # private attribute, because 'axes' has different order, see below
"arrays",
]
arrays: list[np.ndarray | ExtensionArray]
_axes: list[Index]
def __init__(
self,
arrays: list[np.ndarray | ExtensionArray],
axes: list[Index],
verify_integrity: bool = True,
):
# Note: we are storing the axes in "_axes" in the (row, columns) order
# which contrasts the order how it is stored in BlockManager
self._axes = axes
self.arrays = arrays
if verify_integrity:
self._axes = [ensure_index(ax) for ax in axes]
self.arrays = [ensure_wrapped_if_datetimelike(arr) for arr in arrays]
self._verify_integrity()
def make_empty(self: T, axes=None) -> T:
"""Return an empty ArrayManager with the items axis of len 0 (no columns)"""
if axes is None:
axes = [self.axes[1:], Index([])]
arrays: list[np.ndarray | ExtensionArray] = []
return type(self)(arrays, axes)
@property
def items(self) -> Index:
return self._axes[-1]
@property
# error: Signature of "axes" incompatible with supertype "DataManager"
def axes(self) -> list[Index]: # type: ignore[override]
# mypy doesn't work to override attribute with property
# see https://github.com/python/mypy/issues/4125
"""Axes is BlockManager-compatible order (columns, rows)"""
return [self._axes[1], self._axes[0]]
@property
def shape_proper(self) -> tuple[int, ...]:
# this returns (n_rows, n_columns)
return tuple(len(ax) for ax in self._axes)
@staticmethod
def _normalize_axis(axis: int) -> int:
# switch axis
axis = 1 if axis == 0 else 0
return axis
def set_axis(
self, axis: int, new_labels: Index, verify_integrity: bool = True
) -> None:
# Caller is responsible for ensuring we have an Index object.
axis = self._normalize_axis(axis)
if verify_integrity:
old_len = len(self._axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self._axes[axis] = new_labels
def consolidate(self) -> ArrayManager:
return self
def is_consolidated(self) -> bool:
return True
def _consolidate_inplace(self) -> None:
pass
def get_dtypes(self):
return np.array([arr.dtype for arr in self.arrays], dtype="object")
# TODO setstate getstate
def __repr__(self) -> str:
output = type(self).__name__
output += f"\nIndex: {self._axes[0]}"
if self.ndim == 2:
output += f"\nColumns: {self._axes[1]}"
output += f"\n{len(self.arrays)} arrays:"
for arr in self.arrays:
output += f"\n{arr.dtype}"
return output
def _verify_integrity(self) -> None:
n_rows, n_columns = self.shape_proper
if not len(self.arrays) == n_columns:
raise ValueError(
"Number of passed arrays must equal the size of the column Index: "
f"{len(self.arrays)} arrays vs {n_columns} columns."
)
for arr in self.arrays:
if not len(arr) == n_rows:
raise ValueError(
"Passed arrays should have the same length as the rows Index: "
f"{len(arr)} vs {n_rows} rows"
)
if not isinstance(arr, (np.ndarray, ExtensionArray)):
raise ValueError(
"Passed arrays should be np.ndarray or ExtensionArray instances, "
f"got {type(arr)} instead"
)
if not arr.ndim == 1:
raise ValueError(
"Passed arrays should be 1-dimensional, got array with "
f"{arr.ndim} dimensions instead."
)
def reduce(
self: T, func: Callable, ignore_failures: bool = False
) -> tuple[T, np.ndarray]:
"""
Apply reduction function column-wise, returning a single-row ArrayManager.
Parameters
----------
func : reduction function
ignore_failures : bool, default False
Whether to drop columns where func raises TypeError.
Returns
-------
ArrayManager
np.ndarray
Indexer of column indices that are retained.
"""
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
for i, arr in enumerate(self.arrays):
try:
res = func(arr, axis=0)
except TypeError:
if not ignore_failures:
raise
else:
# TODO NaT doesn't preserve dtype, so we need to ensure to create
# a timedelta result array if original was timedelta
# what if datetime results in timedelta? (eg std)
if res is NaT and is_timedelta64_ns_dtype(arr.dtype):
result_arrays.append(np.array(["NaT"], dtype="timedelta64[ns]"))
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "ExtensionArray"; expected "ndarray"
result_arrays.append(
sanitize_array([res], None) # type: ignore[arg-type]
)
result_indices.append(i)
index = Index._simple_new(np.array([None], dtype=object)) # placeholder
if ignore_failures:
indexer = np.array(result_indices)
columns = self.items[result_indices]
else:
indexer = np.arange(self.shape[0])
columns = self.items
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
return new_mgr, indexer
def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T:
"""
Apply grouped reduction function columnwise, returning a new ArrayManager.
Parameters
----------
func : grouped reduction function
ignore_failures : bool, default False
Whether to drop columns where func raises TypeError.
Returns
-------
ArrayManager
"""
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
for i, arr in enumerate(self.arrays):
try:
res = func(arr)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
result_arrays.append(res)
result_indices.append(i)
if len(result_arrays) == 0:
index = Index([None]) # placeholder
else:
index = Index(range(result_arrays[0].shape[0]))
if ignore_failures:
columns = self.items[np.array(result_indices, dtype="int64")]
else:
columns = self.items
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
"""
Apply array_op blockwise with another (aligned) BlockManager.
"""
# TODO what if `other` is BlockManager ?
left_arrays = self.arrays
right_arrays = other.arrays
result_arrays = [
array_op(left, right) for left, right in zip(left_arrays, right_arrays)
]
return type(self)(result_arrays, self._axes)
def apply(
self: T,
f,
align_keys: list[str] | None = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
"""
Iterate over the arrays, collect and create a new ArrayManager.
Parameters
----------
f : str or callable
Name of the Array method to apply.
align_keys: List[str] or None, default None
ignore_failures: bool, default False
**kwargs
Keywords to pass to `f`
Returns
-------
ArrayManager
"""
assert "filter" not in kwargs
align_keys = align_keys or []
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
aligned_args = {k: kwargs[k] for k in align_keys}
if f == "apply":
f = kwargs.pop("func")
for i, arr in enumerate(self.arrays):
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
kwargs[k] = obj.iloc[i]
else:
kwargs[k] = obj.iloc[:, i]._values
else:
# otherwise we have an array-like
kwargs[k] = obj[i]
try:
if callable(f):
applied = f(arr, **kwargs)
else:
applied = getattr(arr, f)(**kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
# if not isinstance(applied, ExtensionArray):
# # TODO not all EA operations return new EAs (eg astype)
# applied = array(applied)
result_arrays.append(applied)
result_indices.append(i)
new_axes: list[Index]
if ignore_failures:
# TODO copy?
new_axes = [self._axes[0], self._axes[1][result_indices]]
else:
new_axes = self._axes
if len(result_arrays) == 0:
return self.make_empty(new_axes)
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
return type(self)(result_arrays, new_axes) # type: ignore[arg-type]
def apply_2d(self: T, f, ignore_failures: bool = False, **kwargs) -> T:
"""
Variant of `apply`, but where the function should not be applied to
each column independently, but to the full data as a 2D array.
"""
values = self.as_array()
try:
result = f(values, **kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
result_arrays = []
new_axes = [self._axes[0], self.axes[1].take([])]
else:
result_arrays = [result[:, i] for i in range(len(self._axes[1]))]
new_axes = self._axes
return type(self)(result_arrays, new_axes)
def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T:
# switch axis to follow BlockManager logic
if swap_axis and "axis" in kwargs and self.ndim == 2:
kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0
align_keys = align_keys or []
aligned_args = {k: kwargs[k] for k in align_keys}
result_arrays = []
for i, arr in enumerate(self.arrays):
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
if self.ndim == 2:
kwargs[k] = obj.iloc[slice(i, i + 1)]._values
else:
kwargs[k] = obj.iloc[:]._values
else:
kwargs[k] = obj.iloc[:, [i]]._values
else:
# otherwise we have an ndarray
if obj.ndim == 2:
kwargs[k] = obj[[i]]
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
if hasattr(arr, "tz") and arr.tz is None: # type: ignore[union-attr]
# DatetimeArray needs to be converted to ndarray for DatetimeLikeBlock
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "_data"
arr = arr._data # type: ignore[union-attr]
elif arr.dtype.kind == "m" and not isinstance(arr, np.ndarray):
# TimedeltaArray needs to be converted to ndarray for TimedeltaBlock
# error: "ExtensionArray" has no attribute "_data"
arr = arr._data # type: ignore[attr-defined]
if self.ndim == 2:
arr = ensure_block_shape(arr, 2)
block = new_block(arr, placement=slice(0, 1, 1), ndim=2)
else:
block = new_block(arr, placement=slice(0, len(self), 1), ndim=1)
applied = getattr(block, f)(**kwargs)
if isinstance(applied, list):
applied = applied[0]
arr = applied.values
if self.ndim == 2 and arr.ndim == 2:
# 2D for np.ndarray or DatetimeArray/TimedeltaArray
assert len(arr) == 1
# error: Invalid index type "Tuple[int, slice]" for
# "Union[ndarray, ExtensionArray]"; expected type
# "Union[int, slice, ndarray]"
arr = arr[0, :] # type: ignore[index]
result_arrays.append(arr)
return type(self)(result_arrays, self._axes)
def quantile(
self,
*,
qs: Float64Index,
axis: int = 0,
transposed: bool = False,
interpolation="linear",
) -> ArrayManager:
arrs = [ensure_block_shape(x, 2) for x in self.arrays]
assert axis == 1
new_arrs = [
quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs
]
for i, arr in enumerate(new_arrs):
if arr.ndim == 2:
assert arr.shape[0] == 1, arr.shape
new_arrs[i] = arr[0]
axes = [qs, self._axes[1]]
return type(self)(new_arrs, axes)
def where(self, other, cond, align: bool, errors: str) -> ArrayManager:
if align:
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
other = extract_array(other, extract_numpy=True)
return self.apply_with_block(
"where",
align_keys=align_keys,
other=other,
cond=cond,
errors=errors,
)
# TODO what is this used for?
# def setitem(self, indexer, value) -> ArrayManager:
# return self.apply_with_block("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
return self.apply_with_block(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
)
def diff(self, n: int, axis: int) -> ArrayManager:
if axis == 1:
# DataFrame only calls this for n=0, in which case performing it
# with axis=0 is equivalent
assert n == 0
axis = 0
return self.apply(algos.diff, n=n, axis=axis, stacklevel=5)
def interpolate(self, **kwargs) -> ArrayManager:
return self.apply_with_block("interpolate", swap_axis=False, **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> ArrayManager:
if fill_value is lib.no_default:
fill_value = None
if axis == 1 and self.ndim == 2:
# TODO column-wise shift
raise NotImplementedError
return self.apply_with_block(
"shift", periods=periods, axis=axis, fill_value=fill_value
)
def fillna(self, value, limit, inplace: bool, downcast) -> ArrayManager:
return self.apply_with_block(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
def downcast(self) -> ArrayManager:
return self.apply_with_block("downcast")
def astype(self, dtype, copy: bool = False, errors: str = "raise") -> ArrayManager:
return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
) -> ArrayManager:
def _convert(arr):
if is_object_dtype(arr.dtype):
return soft_convert_objects(
arr,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
copy=copy,
)
else:
return arr.copy() if copy else arr
return self.apply(_convert)
def replace(self, value, **kwargs) -> ArrayManager:
assert np.ndim(value) == 0, value
# TODO "replace" is right now implemented on the blocks, we should move
# it to general array algos so it can be reused here
return self.apply_with_block("replace", value=value, **kwargs)
def replace_list(
self: T,
src_list: list[Any],
dest_list: list[Any],
inplace: bool = False,
regex: bool = False,
) -> T:
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
return self.apply_with_block(
"_replace_list",
src_list=src_list,
dest_list=dest_list,
inplace=inplace,
regex=regex,
)
def to_native_types(self, **kwargs):
return self.apply(to_native_types, **kwargs)
@property
def is_mixed_type(self) -> bool:
return True
@property
def is_numeric_mixed_type(self) -> bool:
return all(is_numeric_dtype(t) for t in self.get_dtypes())
@property
def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return False # any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
# TODO what is this used for?
return False
@property
def is_single_block(self) -> bool:
return False
def _get_data_subset(self, predicate: Callable) -> ArrayManager:
indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]
arrays = [self.arrays[i] for i in indices]
# TODO copy?
new_axes = [self._axes[0], self._axes[1][np.array(indices, dtype="int64")]]
return type(self)(arrays, new_axes, verify_integrity=False)
def get_bool_data(self, copy: bool = False) -> ArrayManager:
"""
Select columns that are bool-dtype and object-dtype columns that are all-bool.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._get_data_subset(is_inferred_bool_dtype)
def get_numeric_data(self, copy: bool = False) -> ArrayManager:
"""
Select columns that have a numeric dtype.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._get_data_subset(
lambda arr: is_numeric_dtype(arr.dtype)
or getattr(arr.dtype, "_is_numeric", False)
)
def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of ArrayManager
Parameters
----------
deep : bool or string, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self._axes]
else:
new_axes = list(self._axes)
if deep:
new_arrays = [arr.copy() for arr in self.arrays]
else:
new_arrays = self.arrays
return type(self)(new_arrays, new_axes)
def as_array(
self,
transpose: bool = False,
dtype=None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : bool, default False
If True, transpose the return array.
dtype : object, default None
Data type of the return array.
copy : bool, default False
If True then guarantee that a copy is returned. A value of
False does not guarantee that the underlying data is not
copied.
na_value : object, default lib.no_default
Value to be used as the missing value sentinel.
Returns
-------
arr : ndarray
"""
if len(self.arrays) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
# We want to copy when na_value is provided to avoid
# mutating the original object
copy = copy or na_value is not lib.no_default
if not dtype:
dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif isinstance(dtype, PandasDtype):
dtype = dtype.numpy_dtype
elif is_extension_array_dtype(dtype):
dtype = "object"
elif is_dtype_equal(dtype, str):
dtype = "object"
result = np.empty(self.shape_proper, dtype=dtype)
# error: Incompatible types in assignment (expression has type "Union[ndarray,
# ExtensionArray]", variable has type "ndarray")
for i, arr in enumerate(self.arrays): # type: ignore[assignment]
arr = arr.astype(dtype, copy=copy)
result[:, i] = arr
if na_value is not lib.no_default:
result[isna(result)] = na_value
return result
# return arr.transpose() if transpose else arr
def get_slice(self, slobj: slice, axis: int = 0) -> ArrayManager:
axis = self._normalize_axis(axis)
if axis == 0:
arrays = [arr[slobj] for arr in self.arrays]
elif axis == 1:
arrays = self.arrays[slobj]
new_axes = list(self._axes)
new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
return type(self)(arrays, new_axes, verify_integrity=False)
def fast_xs(self, loc: int) -> ArrayLike:
"""
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
"""
dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
values = [arr[loc] for arr in self.arrays]
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)
# for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT
elif is_datetime64_ns_dtype(dtype):
result = DatetimeArray._from_sequence(values, dtype=dtype)._data
elif is_timedelta64_ns_dtype(dtype):
result = | TimedeltaArray._from_sequence(values, dtype=dtype) | pandas.core.arrays.TimedeltaArray._from_sequence |
# Compare the depth of the seabed along the cruise track between two files which each contain lines of the date,
# latitude, longitude and depth value. Depths have come from different sources of bathymetry data. Calculate the
# differences between the depths and output this into csv files.
import csv
import pandas
def file_to_position_depths(file_path):
"""Get the position and depths from one file. Read into memory as a dictionary with (lat, lon) as the key and depth
as the value."""
# create the dictionary
result = {}
# read the position and depth data from the input csv file
with open(file_path) as csvfile:
contents = csv.reader(csvfile)
next(contents)
count = 1
# print progress
for line in contents:
if count % 100_000 == 0:
print(count)
count += 1
# get the latitude, longitude and depth from the row in the file
_, lat, long, depth = line
lat_long = (float(lat), float(long))
# if the depth is empty, then continue
if depth == '':
continue
result[lat_long] = float(depth)
return result
def yield_difference(file_path, position_depths):
"""Using the dictionary of positions and depths, get another file containing the same positions, but a different
source of data. Calculate the difference in depth at the same positions."""
# open the file containing the second set of depth data
with open(file_path) as csvfile:
contents = csv.reader(csvfile)
next(contents)
count = 1
# for each line (date, position and depth) find the corresponding depth from the first file and calculate the
# difference
for line in contents:
date, lat, long, depth = line
lat_long = (float(lat), float(long))
# if the depth is empty, then continue
if depth == '':
continue
depth = float(depth)
other_file_depth = position_depths.get(lat_long, None)
# calculate the differences between depths. If a depth is not found, the difference is left blank
if other_file_depth is not None:
depth_difference = depth - other_file_depth
else:
depth_difference = None
yield [date, lat_long[0], lat_long[1], depth, other_file_depth, depth_difference]
def get_depth_differences(depth1_file, depth2_file):
"""Get the differences in depth between two files containing this data"""
position_depths = file_to_position_depths(depth1_file)
differences = yield_difference(depth2_file, position_depths)
return differences
def write_csv_large_differences(differences, csv_outfile):
"""Where the depth difference is greater than 100 m, write out the position, depths and difference into a csv file"""
header = ['Date', 'Lat', 'Long', 'Depth1', 'Depth2', 'Depth difference']
csv_writer = csv.writer(csv_outfile)
csv_writer.writerow(header)
for difference in differences:
if difference[5] is not None:
if abs(difference[5]) > 100:
csv_writer.writerow(difference)
def write_csv_depth_differences(differences, csv_outfile):
"""Write out the depth differences to a csv file"""
header = ['Date', 'Lat', 'Long', 'Depth1', 'Depth2', 'Depth difference']
csv_writer = csv.writer(csv_outfile)
csv_writer.writerow(header)
for difference in differences:
csv_writer.writerow(difference)
def calculate_differences_and_write_out(depth1_file, depth2_file, differences_csvfile, large_differences_csvfile):
"""Calculate the depth differences and output two files: one with all differences and one with differences larger than 100 m"""
differences = get_depth_differences(depth1_file, depth2_file)
differences = list(differences)
with open(differences_csvfile, 'w') as differencescsvfile:
write_csv_depth_differences(differences, differencescsvfile)
with open(large_differences_csvfile, 'w') as largecsvfile:
write_csv_large_differences(differences, largecsvfile)
def create_dataframe(depth1_file, depth2_file):
"""Create a pandas dataframe of the depth differences. Used for debugging"""
position_depths = file_to_position_depths(depth1_file)
differences = yield_difference(depth2_file, position_depths)
df = | pandas.DataFrame(differences, columns=['Date', 'Lat', 'Long', 'Depth1', 'Depth2', 'Depth difference']) | pandas.DataFrame |
import requests
import sys
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from math import floor, ceil
import pickle
import pandas
import seaborn as sns
def find_id(search):
page = requests.get(f'http://www.imdb.com/search/title?title={search}&title_type=tv_series')
soup = BeautifulSoup(page.content, 'html.parser')
try:
h3 = soup.find('h3', class_='lister-item-header')
a = h3.find('a')
id = a['href'].split('/')[2]
title = a.text
return id, title
except:
return None
def search_input(x):
id = None
while(id == None):
try:
#search = '%20'.join(input('Enter show name: ').split())
search = '%20'.join(x.split())
id, title = find_id(search)
except:
break
return id, title
def wrong_season(season, soup):
real_season = int(soup.find('h3', id='episode_top').text.split()[-1])
return season > real_season
def episode_rating(episode):
div = episode.find('div', class_='ipl-rating-star')
if div is None:
return -1
span = div.find('span', class_='ipl-rating-star__rating')
rating = float(span.text)
return rating
def season_ratings(id, season):
page = requests.get(f'http://www.imdb.com/title/{id}/episodes?season={season}')
soup = BeautifulSoup(page.content, 'html.parser')
if wrong_season(season, soup):
return None
ratings = []
for episode in soup.find_all('div', class_='list_item'):
rating = episode_rating(episode)
if rating > 0:
ratings.append(rating)
else:
break
return ratings
def show_ratings(id):
seasons = []
for season in range(1, 1000):
ratings = season_ratings(id, season)
if ratings == None:
break
if len(ratings) > 0:
print(f'Season {season} completed')
seasons.append(ratings)
return seasons
def plot(seasons, title):
x = 1
for i, season in enumerate(seasons):
color = f'C{i}'
newx = x + len(season)
xx = range(x, newx)
plt.plot(xx, season, f'{color}o')
z = np.polyfit(xx, season, 1)
p = np.poly1d(z)
plt.plot(xx, p(xx), color)
x = newx
flat_seasons = [item for sublist in seasons for item in sublist]
miny = max(0, floor(min(flat_seasons)))
maxy = min(10, ceil(max(flat_seasons)))
xx = range(1, x)
z = np.polyfit(xx, flat_seasons, 1)
p = np.poly1d(z)
plt.plot(xx, p(xx), '0.7')
plt.axis([0, x, miny, maxy])
plt.title(title)
plt.show()
def lectureFichier(nomFichier):
with open(nomFichier,'rb') as fichier :
recupere = pickle.Unpickler(fichier)
return recupere.load()
if __name__ == '__main__':
search = lectureFichier("./serie_1_10")
print(search[1])
table = search[1]
print(table)
shows=[]
titles=[]
for i in range(len(table)):
if i!=3:
id, title = search_input(table[i])
seasons = show_ratings(id)
print(seasons)
shows.append(seasons)
titles.append(title)
dict = {'titles': titles, 'scores': shows}
df = | pandas.DataFrame(dict) | pandas.DataFrame |
# Author: <NAME>
# analysis.py: script that analyses the iris dataset. This script was my final project assignment for the subject Programming and Scripting under the High Superior Diploma of Science in computer science (Data Analytics)
# import libraries to use further
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Open Iris dataset using panas library (this is considering iris dataset is saved in the same workspace than the script)
df = | pd.read_csv("iris.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 12:51:16 2019
@author: Marc-Kevin
"""
import pandas as pd
from collections import Counter
from sklearn.preprocessing import StandardScaler
def fw_ratings(team):
url = 'http://www.puckon.net/?_ga=2.210512981.1165101973.1547008033-2109006389.1546797137'
raw = pd.read_html(url,header=1)[0]
raw_cut = raw[['Team','GP','SA.1','ESVA.1']]
df = raw_cut[['SA.1','ESVA.1']]
get_names = df.columns
scaler = StandardScaler()
scaled_df = scaler.fit_transform(df)
scaled_df = pd.DataFrame(scaled_df,columns=get_names)
new = pd.concat([scaled_df,raw_cut[['Team']]],axis=1)
sa = pd.pivot_table(new,values = 'SA.1',columns = 'Team')
esva = pd.pivot_table(new,values = 'ESVA.1',columns = 'Team')
rating_lst = []
if team in sa.columns:
sa_1 = sa.loc['SA.1',f'{team}']
esva_1 = esva.loc['ESVA.1',f'{team}']
rating = -1.7051343491288098e-15 -0.19222278*sa_1 + 0.28562659*esva_1
return rating
elif team == 'league':
for t in sa.columns:
sa_1 = sa.loc['SA.1',f'{t}']
esva_1 = esva.loc['ESVA.1',f'{t}']
rating = -1.7051343491288098e-15 -0.19222278*sa_1 + 0.28562659*esva_1
rating_lst.append(rating)
keys = sa.columns
values = rating_lst
r_dict = dict(zip(keys,values))
rating_df = pd.DataFrame(r_dict,index=[0])
rating_df = | pd.melt(rating_df,var_name='Tm',value_name='Fw_rating') | pandas.melt |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from testfixtures import Replacer
from testfixtures.mock import Mock
from gs_quant.timeseries import EdrDataReference
from gs_quant.timeseries.backtesting import Basket, basket_series, MqValueError, MqTypeError, RebalFreq, date, \
DataContext, np
def test_basket_series():
dates = [
datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
datetime.datetime(2019, 1, 3),
datetime.datetime(2019, 1, 4),
datetime.datetime(2019, 1, 5),
datetime.datetime(2019, 1, 6),
]
x = pd.Series([100.0, 101, 103.02, 100.9596, 100.9596, 102.978792], index=dates)
y = pd.Series([100.0, 100, 100, 100, 100, 100], index=dates)
assert_series_equal(x, basket_series([x], [1]))
assert_series_equal(x, basket_series([x, x], [0.5, 0.5]))
assert_series_equal(x, basket_series([x, x, x], [1 / 3, 1 / 3, 1 / 3]))
assert_series_equal(x, basket_series([x, y], [1, 0]))
assert_series_equal(y, basket_series([x, y], [0, 1]))
with pytest.raises(MqValueError):
basket_series([x, y], [1])
with pytest.raises(MqTypeError):
basket_series([1, 2, 3], [1])
dates = [
datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
datetime.datetime(2019, 1, 3),
datetime.datetime(2019, 1, 4),
datetime.datetime(2019, 1, 5),
datetime.datetime(2019, 1, 6),
datetime.datetime(2019, 2, 1),
datetime.datetime(2019, 2, 2),
datetime.datetime(2019, 2, 3),
datetime.datetime(2019, 2, 4),
datetime.datetime(2019, 2, 5),
datetime.datetime(2019, 2, 6),
]
mreb = pd.Series(
[100.0, 101, 103.02, 100.9596, 100.9596, 102.978792,
100.0, 101, 103.02, 100.9596, 100.9596, 102.978792],
index=dates)
assert_series_equal(mreb, basket_series([mreb], [1], rebal_freq=RebalFreq.MONTHLY))
def _mock_spot_data():
dates = pd.date_range(start='2021-01-01', periods=6)
x = pd.DataFrame({'spot': [100.0, 101, 103.02, 100.9596, 100.9596, 102.978792]}, index=dates)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'spot': [100.0, 100, 100, 100, 100, 100]}, index=dates)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
return x.append(y)
def _mock_spot_data_feb():
dates_feb = pd.date_range(start='2021-02-01', periods=6)
x = pd.DataFrame({'spot': [100.0, 101.5, 106.02, 100.1, 105.3, 102.9]}, index=dates_feb)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'spot': [100.0, 101.5, 100.02, 98.1, 95.3, 93.9]}, index=dates_feb)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
return x.append(y)
def test_basket_price():
with pytest.raises(MqValueError):
Basket(['AAPL UW'], [0.1, 0.9], RebalFreq.MONTHLY)
dates = pd.DatetimeIndex([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), date(2021, 1, 4), date(2021, 1, 5),
date(2021, 1, 6)])
dates_feb = pd.DatetimeIndex([date(2021, 2, 1), date(2021, 2, 2), date(2021, 2, 3), date(2021, 2, 4),
date(2021, 2, 5), date(2021, 2, 6)])
replace = Replacer()
mock_data = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_data.side_effect = [_mock_spot_data(), _mock_spot_data_feb()]
mock_asset = replace('gs_quant.timeseries.backtesting.GsAssetApi.get_many_assets_data', Mock())
mock_asset.return_value = [{'id': 'MA4B66MW5E27U9VBB94', 'bbid': 'AAPL UW'},
{'id': 'MA4B66MW5E27UAL9SUX', 'bbid': 'MSFT UW'}]
a_basket = Basket(['AAPL UW', 'MSFT UW'], [0.1, 0.9], RebalFreq.MONTHLY)
expected = pd.Series([100.0, 100.1, 100.302, 100.09596, 100.09596, 100.297879], index=dates)
with DataContext('2021-01-01', '2021-01-06'):
actual = a_basket.price()
assert_series_equal(actual, expected)
expected = pd.Series([100.00, 101.50, 100.62, 98.30, 96.30, 94.80], index=dates_feb)
with DataContext('2021-02-01', '2021-02-06'):
actual = a_basket.price()
assert_series_equal(actual, expected)
mock_asset = replace('gs_quant.timeseries.backtesting.GsAssetApi.get_many_assets_data', Mock())
mock_asset.return_value = [{'id': 'MA4B66MW5E27U9VBB94', 'bbid': 'AAPL UW'}]
with pytest.raises(MqValueError):
Basket(['AAPL UW', 'ABC'], [0.1, 0.9], RebalFreq.MONTHLY).price()
with pytest.raises(NotImplementedError):
a_basket.price(real_time=True)
replace.restore()
def test_basket_average_implied_vol():
replace = Replacer()
dates = pd.DatetimeIndex([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), date(2021, 1, 4), date(2021, 1, 5),
date(2021, 1, 6)])
x = pd.DataFrame({'impliedVolatility': [30.0, 30.2, 29.8, 30.6, 30.1, 30.0]}, index=dates)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'impliedVolatility': [20.0, 20.2, 20.3, 20.6, 21.1, 20.0]}, index=dates)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
implied_vol = x.append(y)
implied_vol.index.name = 'date'
mock_data = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_data.side_effect = [implied_vol, _mock_spot_data()]
mock_asset = replace('gs_quant.timeseries.backtesting.GsAssetApi.get_many_assets_data', Mock())
mock_asset.return_value = [{'id': 'MA4B66MW5E27U9VBB94', 'bbid': 'AAPL UW'},
{'id': 'MA4B66MW5E27UAL9SUX', 'bbid': 'MSFT UW'}]
a_basket = Basket(['AAPL UW', 'MSFT UW'], [0.1, 0.9], RebalFreq.DAILY)
expected = pd.Series([21.0, 21.2, 21.25, 21.6, 22.0, 21.0], index=dates)
actual = a_basket.average_implied_volatility('6m', EdrDataReference.DELTA_CALL, 50)
assert_series_equal(actual, expected)
with pytest.raises(NotImplementedError):
a_basket.average_implied_volatility('6m', EdrDataReference.DELTA_CALL, 50, real_time=True)
replace.restore()
def test_basket_average_realized_vol():
replace = Replacer()
dates = pd.DatetimeIndex([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), date(2021, 1, 4), date(2021, 1, 5),
date(2021, 1, 6)])
dates_feb = pd.DatetimeIndex([date(2021, 2, 1), date(2021, 2, 2), date(2021, 2, 3), date(2021, 2, 4),
date(2021, 2, 5), date(2021, 2, 6)])
mock_data = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_data.side_effect = [_mock_spot_data(), _mock_spot_data_feb(), _mock_spot_data_feb()]
mock_asset = replace('gs_quant.timeseries.backtesting.GsAssetApi.get_many_assets_data', Mock())
mock_asset.return_value = [{'id': 'MA4B66MW5E27U9VBB94', 'bbid': 'AAPL UW'},
{'id': 'MA4B66MW5E27UAL9SUX', 'bbid': 'MSFT UW'}]
a_basket = Basket(['AAPL UW', 'MSFT UW'], [0.1, 0.9], RebalFreq.DAILY)
expected = pd.Series([1.1225, 4.49, 2.245, 2.245], index=dates[2:])
with DataContext('2021-01-01', '2021-01-06'):
actual = a_basket.average_realized_volatility('2d')
assert_series_equal(actual, expected)
expected = pd.Series([3.304542, 3.174902, 3.174902], index=dates[3:])
with DataContext('2021-01-01', '2021-01-06'):
actual = a_basket.average_realized_volatility('3d')
assert_series_equal(actual, expected)
mock_data.assert_called_once()
expected = pd.Series([34.698082, 19.719302, 18.860533], index=dates_feb[3:])
with DataContext('2021-02-01', '2021-02-06'):
actual = a_basket.average_realized_volatility('3d')
assert_series_equal(actual, expected)
with pytest.raises(NotImplementedError):
a_basket.average_realized_volatility('2d', real_time=True)
mock_get_last = replace('gs_quant.timeseries.measures.get_last_for_measure', Mock())
mock_get_last.return_value = None
# Test case where ts.get_last_for_measure returns none
with DataContext('2021-02-01', datetime.date.today() + datetime.timedelta(days=2)):
a_basket.average_realized_volatility('2d')
replace.restore()
def _mock_vol_simple():
return pd.Series([1 for i in range(5)], index=pd.date_range('2021-09-01', '2021-09-05'))
def _mock_data_simple():
a = pd.Series([1 for i in range(5)], index=pd.date_range('2021-09-01', '2021-09-05'))
x = pd.DataFrame({'spot': a.tolist()}, index=a.index)
x['assetId'] = 'XLC_MOCK_MQID'
y = pd.DataFrame({'spot': a.tolist()}, index=a.index)
y['assetId'] = 'XLB_MOCK_MQID'
z = pd.DataFrame({'spot': (a ** 3).tolist()}, index=a.index)
z['assetId'] = 'SPX_MOCK_MQID'
return x.append(y).append(z)
def _mock_spot_data_identical():
dates = pd.date_range(start='2021-01-01', periods=6)
x = pd.DataFrame({'spot': [100.0, 101, 103.02, 100.9596, 100.9596, 102.978792]}, index=dates)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'spot': [100.0, 101, 103.02, 100.9596, 100.9596, 102.978792]}, index=dates)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
return x.append(y)
def _mock_spot_data_corr():
dates = | pd.date_range(start='2021-01-01', periods=6) | pandas.date_range |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from seir.sampling.model import SamplingNInfectiousModel
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
logging.info('Loading data')
# read calibration data
actual_hospitalisations = pd.read_excel('data/calibration.xlsx', sheet_name='Hospitalisations')
actual_hospitalisations['Date'] = [pd.to_datetime(x, ).date() for x in actual_hospitalisations['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
actual_infections = | pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv') | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.