prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from transform import Transform
from shapely.geometry import Point,LineString
import glob
import os.path
import json
import argparse
import logging
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
lower, upper = 0, 7500
mu, sigma = 150, 150
X = truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
logger = logging.getLogger('em.aggregate')
parser = argparse.ArgumentParser(description='Aggregate exported data')
parser.add_argument('--prefix',
help="File prefix",
default="data")
parser.add_argument('--filter',
help="File prefix",
action="store_true",
default=False)
parser.add_argument(
'-d', '--debug',
help="Print lots of debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Be verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
parser.add_argument(
'-t', '--team',
help="Team for ao system",
default="ao-ercot-tx-system"
)
parser.add_argument('--store',
nargs='?',
default="store.h5",
help="Path for local hdf storage")
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
store = pd.HDFStore(args.store)
items=['bus','branch','gen','transformer']
data={
"bus":{},
"branch":{},
"gen":{},
"transformer":{}
}
bus_template='templates/substation.yaml'
bus_transform=Transform(bus_template)
bus_outfile='bus-'
bus_group_template='templates/substation-group.yaml'
bus_group_transform=Transform(bus_group_template)
bus_group_outfile='group-bus-'
line_template='templates/transmission.yaml'
line_transform=Transform(line_template)
line_outfile='line-'
line_group_template='templates/transmission-group.yaml'
line_group_transform=Transform(line_group_template)
line_group_outfile='group-line-'
data_bus={}
bus_voltage={}
bus_name={}
bus_geo={}
df_bus_list=[]
df_line_list=[]
team=args.team
def get_bus_key(num):
return 'mmwg_bus_'+str(num)
def get_branch_key(branch_id):
return "mmwg_"+branch_id
def get_rn(item_id):
return "team/{team}/asset/{item_id}".format(team=team,item_id=item_id)
def get_branch_rns(edges):
return ";".join([ get_rn(get_branch_key(edge)) for edge in edges.split(",") ])
def get_capacity(row):
return X.rvs(1)[0]
for i in glob.glob(args.prefix+"*.raw"):
name=os.path.splitext(os.path.basename(i))[0]
print(name)
dfs={}
for item in items:
dfs[item]=store.get('{}_{}'.format(name,item))
print('{}: {}'.format(item,len(dfs[item])))
dbus=dfs['bus']
dbranch=dfs['branch']
dbus['bus_key']=dbus['bus'].apply(get_bus_key)
dbus['rn']=dbus['bus_key'].apply(get_rn)
dbranch['id']=dbranch.index.map(get_branch_key)
dbranch['rn']=dbranch['id'].apply(get_rn)
dbranch['bus0_key']=dbranch['bus0'].apply(get_bus_key)
dbranch['bus1_key']=dbranch['bus1'].apply(get_bus_key)
if args.filter:
print('Bus before: ',dbus.shape)
print('Branch before:',dbranch.shape)
#filtered_bus=dbus.loc[(dbus['bus']>=30000)&(dbus['bus']<=40000)].copy()
filtered_bus=dbus.copy()
print(dbus.edges.head())
edges=set(filtered_bus['edges'].str.cat(sep=',').split(','))
dbranch=dbranch.loc[dbranch.index.isin(edges)].copy()
buses=list(dbus.bus)
dbranch=dbranch.loc[(dbranch.bus0.isin(buses))&dbranch.bus1.isin(buses)]
print('Bus Middle:',filtered_bus.shape)
#all_bus=set(dbranch['bus0_key']).union(set(dbranch['bus1_key']))
#dbus=dbus.loc[dbus['bus_key'].isin(all_bus)].copy()
print('Bus after: ',dbus.shape)
print('Branch after:',dbranch.shape)
dbus['branch_rns']=dbus.edges.apply(get_branch_rns)
dbus['available_capacity']=dbus['bus'].apply(get_capacity)
dbus.loc[dbus['v_nom']<100,'available_capacity']/=100
dbus.loc[dbus['v_nom']<200,'available_capacity']/=10
dbus.loc[dbus['v_nom']>200,'available_capacity']*=1.5
dbus.loc[dbus['v_nom']>300,'available_capacity']*=2
dbus.loc[dbus['v_nom']>400,'available_capacity']*=1.5
# dbus.plot()
# dbus['available_capacity'].hist()
#plt.show()
print('DBUS',dbus.head(),dbus.columns)
## Buses
if 'lat' in dbus and 'long' in dbus:
dbus['bus_name']=dbus['ev_name']+' '+dbus['voltage'].astype(str)+'kV'
dbus.loc[dbus['bus_name'].isnull(),'bus_name']=dbus['psse_name'].str.replace("'",'').str.replace(" ",'') + ' '+dbus['voltage'].astype(str)+'kV'
print(dbus['bus_name'])
dbus['geometry'] = list(zip(dbus.long, dbus.lat))
dbus['geometry'] = dbus['geometry'].apply(Point)
dbus['psse_name']=dbus['psse_name'].str.replace("'",'')
dbus['zone_name']=dbus['zone_name'].str.replace("'",'')
dbus['area_name']=dbus['area_name'].str.replace("'",'')
dbus['owner_name']=dbus['owner_name'].str.replace("'",'')
#print('bus',dbus)
print('columns',dbus.columns)
bus_group_transform.to_file(dbus.itertuples(),bus_group_outfile+name.lower()+'.json')
total_load=dbus['p_load'].sum()
total_gen=dbus['p_gen'].sum()
dbus['ldf']=dbus['p_load']/total_load
df_bus_list.append(dbus)
for itemrow in dbus.iterrows():
item=itemrow[1]
bus_key=item['bus_key']
bus_voltage[bus_key]=item['voltage']
bus_name[bus_key]=item['bus_name']
bus_geo[bus_key]=item['geometry']
if bus_key not in data_bus:
data_bus[bus_key]={}
if name not in data_bus[bus_key]:
data_bus[bus_key]={}
data_bus[bus_key][name]={}
for df in ['p_load','p_gen','ldf']:
data_bus[bus_key][name][df]=item[df]
# Transmission
print(dbranch)
dbranch['bus0_name']=dbranch['bus0_key'].apply(lambda x: bus_name.get(x,'Unknown'))
dbranch['bus1_name']=dbranch['bus1_key'].apply(lambda x: bus_name.get(x,'Unknown'))
dbranch['voltage']=dbranch['bus0_key'].apply(lambda x: bus_voltage.get(x,130))
dbranch['branch_key']=dbranch['id']
dbranch['geometry']=dbranch.apply(lambda x: LineString([bus_geo[x['bus0_key']],bus_geo[x['bus1_key']]]),axis=1)
line_group_transform.to_file(dbranch.itertuples(),line_group_outfile+name.lower()+'.json')
df_line_list.append(dbranch)
print(dbranch)
print(dbranch.columns)
break
# # Buses
df_bus= | pd.concat(df_bus_list) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import OPTICS
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from hurst import compute_Hc
from arch.unitroot import ADF
import itertools
import typing
class ClusteringPairSelection:
performance_features: pd.DataFrame = pd.DataFrame()
explained_variance: pd.Series = pd.Series()
_clusters: pd.Series = pd.Series()
pairs_list: typing.List[typing.Tuple]
cointegrated_pairs_list: typing.List[typing.List]
cointegration_result: pd.DataFrame = pd.DataFrame()
filtered_pairs: pd.DataFrame = pd.DataFrame()
spreads: pd.DataFrame = pd.DataFrame()
def __init__(self, price: pd.DataFrame):
price_no_na = price.dropna(axis=1)
n_dropped = price.shape[1] - price_no_na.shape[1]
print(f"Dropped {n_dropped} columns out of {price.shape[1]}")
self.price = price_no_na
self.log_price = np.log(price_no_na)
self.performance = self.log_price.diff().iloc[1:]
self.normal_performance = StandardScaler().fit_transform(self.performance)
def select_pairs(self):
print("Converting prices to features...")
self.returns_to_features(5)
pd.Series(self.explained_variance).plot(kind='bar', title="Cumulative explained variance")
plt.show()
print("Creating clusters....")
self.create_clusters(3)
self.clusters.plot(kind='bar', title='Clusters, % of Allocated samples')
plt.show()
self.plot_clusters()
print("Running cointegration check....")
self.check_cointegration()
print("Estimating selection criteria...")
self._calculate_hurst_exponent()
self._calculate_half_life()
print("Applying filters...")
self._apply_post_cointegration_filters()
def returns_to_features(self, n_components):
pca = PCA(n_components=n_components)
transposed_returns = self.normal_performance.T
pca.fit(transposed_returns)
reduced_returns = pd.DataFrame(transposed_returns.dot(pca.components_.T), index=self.performance.columns)
self.explained_variance = pca.explained_variance_ratio_.cumsum()
self.performance_features = reduced_returns
def create_clusters(self, min_samples):
optics = OPTICS(min_samples=min_samples)
clustering = optics.fit(self.performance_features)
len(clustering.labels_[clustering.labels_ == -1]) / len(clustering.labels_)
classified = pd.Series(clustering.labels_, index=self.performance.columns)
self._clusters = classified
self._create_cluster_based_pairs()
@property
def clusters(self):
clusters = pd.Series(self._clusters.index.values, index=self._clusters)
clusters = clusters.groupby(level=0).count()
clusters /= clusters.sum()
return clusters
@staticmethod
def _npr(n, r=2):
return np.math.factorial(n) / np.math.factorial(n - r)
def _create_cluster_based_pairs(self):
classified = self._clusters[self._clusters != -1]
all_pairs = []
for group_id in classified.sort_values().unique():
group = classified[classified == group_id].index.tolist()
combinations = list(itertools.permutations(group, 2))
all_pairs.extend(combinations)
self.pairs_list = all_pairs
def check_cointegration(self):
results = []
pairs_series = {}
total_pairs_length = len(self.pairs_list)
for i, pair in enumerate(self.pairs_list):
x, y = self.log_price.loc[:, pair].values.T
pair_name = "|".join(pair)
pair_id = "|".join(sorted(pair))
residuals = self._get_residuals(x, y)
adf_test = ADF(residuals, lags=1)
p_value = adf_test.pvalue
test_stat = adf_test.stat
results.append({"id": pair_id, "p_value": p_value, "stat": test_stat, "pair": pair_name})
pairs_series[pair_name] = residuals
current = (i + 1)
print(f"{current}/{total_pairs_length} ({current / total_pairs_length:.2%})", end="\r", flush=True)
pairs_series = pd.DataFrame(pairs_series, index=self.price.index)
results = pd.DataFrame(results).set_index("id")
results = results.sort_values("p_value", ascending=False).groupby(level=0).first()
self.cointegration_result = results.set_index("pair")
valid_pairs = [s.split("|") for s in results.index]
self.cointegrated_pairs_list = valid_pairs
self.spreads = pairs_series
@staticmethod
def _regress(y, exogenous):
A = exogenous
A = np.vstack([np.ones(len(A)), A]).T
output = np.linalg.inv(A.T @ A) @ A.T @ y
return output
@classmethod
def _get_residuals(cls, x, y):
intercept, slope = cls._regress(y, x)
residuals = y - (slope * x + intercept)
return residuals
@classmethod
def _get_half_life(cls, spread):
change = spread.diff()
lag = spread.shift().fillna(0)
intercept, slope = cls._regress(change.iloc[1:], lag.iloc[1:])
half_life = -np.log(2) / slope
return half_life
def _calculate_hurst_exponent(self):
hurst_values = {}
for name, row in self.cointegration_result.iterrows():
pair_ts = self.spreads[name].values
H, _, _ = compute_Hc(pair_ts)
hurst_values[name] = H
hurst_values = pd.Series(hurst_values).rename("hurst")
self.cointegration_result = self.cointegration_result.join(hurst_values)
def _calculate_half_life(self):
half_lives = {}
for name, row in self.cointegration_result.iterrows():
pair_ts = self.spreads[name]
half_lives[name] = self._get_half_life(pair_ts)
half_lives = pd.Series(half_lives).rename("half_life")
self.cointegration_result = self.cointegration_result.join(half_lives)
def _apply_post_cointegration_filters(self):
self.filtered_pairs = self.cointegration_result[
(self.cointegration_result.p_value < 0.05) &
(self.cointegration_result.hurst < 0.5) &
(self.cointegration_result.half_life > 3) &
(self.cointegration_result.half_life < 180)
]
def plot_clusters(self, labeled=False):
tsne = TSNE(learning_rate=1000, perplexity=25).fit_transform(self.performance_features)
clusters = pd.concat([
self._clusters.rename("cluster"),
| pd.DataFrame(tsne, columns=["x", "y"], index=self._clusters.index) | pandas.DataFrame |
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import yaml
from matplotlib import cm
import pymongo
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.lines import Line2D
from common import _preprocess, setup_plt, query_data, get_colorful_styles, output_dir, COLOR_GRAY, yaxis_formatter, get_markers
from operator import itemgetter
def load_data(model):
if model == 'FEMNIST':
query = {
"meta.description": "FEMNIST increase attackers, pgd and blackbox",
# 'hyperparameters.args.client.malicious.attack_stop': { '$ne': 0 }
}
elif model == 'CIFAR10':
query = {
"meta.description": "CIFAR10 increase attackers and blackbox, but temp",
"metrics.result": {'$exists': False}
}
docs_list = query_data(query)
assert len(docs_list) > 0, "Database query is empty!"
metrics = [ | pd.DataFrame.from_dict(doc) | pandas.DataFrame.from_dict |
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.calibration import CalibratedClassifierCV
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
def eval_measures_multi_label(prob_predicted, y_train, threshold=0.08, level='chapter'):
labels = list(set(y_train))
dict_labels = {i: labels[i] for i in range(0, len(labels))}
predicted_labels = []
# Assign the threshold in such a way that the label cardinality for the test set is
# in the same order as the label cardinality in the training set
for i in range(prob_predicted.shape[0]):
lst = [np.argwhere(prob_predicted[i] > threshold)]
lst_val = np.vectorize(dict_labels.get)(lst)[0]
predicted_labels.append(lst_val)
# A dataset containing patient id + corresponding predicted chapter labels
predicted_set = list(zip(patient_id_test, predicted_labels))
# new X_test for multi_label removing duplicates
multi_label_patient_id = df_multi_label['ID']
true_labels = []
for j in range(df_multi_label.shape[0]):
lst = df_multi_label[level][j].split(",")
true_labels.append(lst)
# A dataset containing patient id + corresponding true chapter labels
true_set = list(zip(multi_label_patient_id, true_labels))
sum_a = 0
sum_p = 0
sum_r = 0
sum_f = 0
n = 0
for i in range(len(predicted_set)):
# predicted_set[i][0] is the patient id
pid = predicted_set[i][0]
for j in range(len(true_set)):
if true_set[j][0] == pid:
YZ_intersection = len(np.intersect1d(predicted_set[i][1], true_set[j][1]))
YZ_union = len(np.union1d(predicted_set[i][1], true_set[j][1]))
Yi = len(predicted_set[i][1])
Zi = len(true_set[j][1])
break
n = n + 1
sum_a += YZ_intersection / YZ_union
sum_p += YZ_intersection / Zi
sum_r += YZ_intersection / Yi
sum_f += (2 * YZ_intersection) / (Yi + Zi)
acc = sum_a / n
pre = sum_p / n
rec = sum_r / n
f1 = sum_f / n
return acc, pre, rec, f1
data = | pd.read_csv("Data/icd_selected_data_single_with codes.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
from copy import deepcopy
from collections import OrderedDict
import os
import matplotlib.image as mpimg
import pickle
def DATA_ANALYSIS():
train_files = os.listdir('train/')
test_files = os.listdir('test/')
print(f"Train-set Size: {len(train_files)}")
print(f"Test-set Size: {len(test_files)}")
# ======================== Images demonstration ====
images_num = 16
fig, axs = plt.subplots(4, 4)
for i in range(images_num):
image = mpimg.imread(f"train/{train_files[i]}")
axs[int(i/4), i%4].imshow(image)
plt.xlabel(f"{train_files[i]}")
axs[int(i / 4), i % 4].title.set_text(f"{train_files[i]}")
axs[int(i / 4), i % 4].axes.get_xaxis().set_visible(False)
axs[int(i / 4), i % 4].axes.get_yaxis().set_visible(False)
plt.subplots_adjust(left=None, bottom=None, right=None, top=1.5, wspace=None, hspace=None)
plt.show()
# ======================== Data insights =======
#
masked_num = 0
unmasked_num = 0
for image_name in train_files:
if("_1." in image_name):
masked_num +=1
elif("_0." in image_name):
unmasked_num +=1
else:
image = mpimg.imread(f"train/{image_name}")
plt.imshow(image)
plt.show()
print(f"Proper mask wearers in train dataset: {masked_num}")
print(f"Unproper mask wearers in train dataset: {unmasked_num}")
masked_num = 0
unmasked_num = 0
for image_name in test_files:
if("_1." in image_name):
masked_num +=1
elif("_0." in image_name):
unmasked_num +=1
else:
image = mpimg.imread(f"test/{image_name}")
plt.imshow(image)
plt.show()
print(f"Proper mask wearers in test dataset: {masked_num}")
print(f"Unproper mask wearers in test dataset: {unmasked_num}")
# ======================== DATA EXCTRACTION ===============
def DF_creation(path_to_folder='train', load_save = True):
"""
:param path_to_folder: 'train' or 'test'
:param load_save: if True load save (if exists), else creates files from scratch
:return:
"""
if(path_to_folder[-1]== '/'):
path_to_folder = path_to_folder[:-1]
images_list = os.listdir(path_to_folder + '/')
df_file = ''
for el in path_to_folder.split('/'):
df_file += el + '_'
df_file += 'df.pkl'
if ((df_file in os.listdir('saves/')) and load_save):
df = pickle.load(open(f'saves/{df_file}', "rb"))
else:
df = | pd.DataFrame() | pandas.DataFrame |
# dash libs
import collections
import dash
import pandas as pd
from sqlalchemy import create_engine
# dash interactive states
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
# dash components
import dash_html_components as html
import dash_core_components as dcc
import dash_table
# Plotly figure libraries
import plotly.express as px
# set connection string
user = 'user'
password = 'password'
DATABASE_URI = 'postgres+psycopg2://{}:{}@localhost:5432/dataviz'.format(user,password)
con = create_engine(DATABASE_URI)
#styling
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
dimensions = ["x", "y", "color", "facet_col", "facet_row"]
dgraph = dimensions + ['hover-dropdown']
user_cols = {'':{},
'cycles':{'crop', 'location',
'planting_date', 'nitrogen_rate', 'weed_fraction', 'yield',
'year','unique_id'},
'economic':{}}
#Config elements
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
tables = ['cycles']
# Layout
app.layout = html.Div([
dcc.Store(id='s-cols'),
dcc.Store(id='s-data'),
html.Div([
html.Div([
html.P(['Datasource: ']),
dcc.Dropdown(id='dd-table',
options=[dict(label=x, value=x) for x in tables]
),
html.Button('Show Data', id='btn-table'),
html.P(['X Axis: ']),
dcc.Dropdown(id='dd-x',options=[]),
html.P(['Y Axis: ']),
dcc.Dropdown(id='dd-y'),
html.P(['Color: ']),
dcc.Dropdown(id='dd-color'),
html.P(['Facet Column: ']),
dcc.Dropdown(id='dd-facet_col'),
html.P(['Facet Row: ']),
dcc.Dropdown(id='dd-facet_row'),
html.P(['On Hover show: ']),
html.Div([dcc.Dropdown(id='dd-hover',multi=True)]),
# html.Div(id='collist'),
# dcc.Dropdown(id='dd-cols'),
# html.Div(id='table'),
html.Div(id='test'),
html.Div([
html.Button('Build Graph', id='btn-graph'),
],style={'float':'right'})
],className="four columns"),
html.Div([
dcc.Graph(id='g-scatter')
],className="eight columns")
], className="row"),
html.Div([
html.Div(id='dt-table')
],className="row")
])
# Callbacks
# Query SQL for selected table to generate columns list
@app.callback([Output('s-cols', 'data'),Output('s-data', 'data')],
[Input("dd-table", "value")],
[State('s-cols', 'data'),State('s-data', 'data')]
)
def update_cols(table, cols, data):
if table is None or table == '':
raise PreventUpdate
col_list = list(user_cols[table])
col_list.sort(key=str.lower)
select_cols = ", ".join(list(col_list))
query = 'SELECT {} FROM {}'.format(select_cols,table)
dataf = | pd.read_sql(query,con) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 12:05:47 2020
@author: Samyak
"""
#==============================================================================
# REGRESSION MODEL - PREDICTING PRICE OF PRE OWNED CARS
#==============================================================================
import numpy as np
import pandas as pd
import seaborn as sns
#setting graph size
sns.set(rc = {"figure.figsize": (10, 8)})
# Reading Data and getting info about data
data_price = pd.read_csv("cars_sampled.csv")
cars = data_price.copy()
cars.info()
cars.describe()
# To set float values upto 3 decimal places
pd.set_option("display.float_format", lambda x: "%.3f" % x)
cars.describe()
# Dropping unwanted columns
cols = ["name", "dateCrawled", "dateCreated", "postalCode", "lastSeen"]
cars = cars.drop(columns = cols, axis =1)
#Removing duplicates from the data
cars.drop_duplicates(keep="first", inplace=True)
cars.isnull().sum()
# varialbe Yearof Registration
yearwise = cars["yearOfRegistration"].value_counts().sort_index()
cars["yearOfRegistration"].describe()
sum(cars["yearOfRegistration"] > 2018)
sum(cars["yearOfRegistration"] < 1950)
sns.regplot(x="yearOfRegistration", y="price", scatter=True, fit_reg=False, data=cars)
# Removing Null values
cars = cars.dropna(axis = 0)
cars.isnull().sum()
# varialbe price
price_count = cars["price"].value_counts().sort_index()
cars["price"].describe()
sum(cars["price"] > 150000)
sum(cars["price"] < 100)
sns.distplot(cars["price"])
# varialbe PowerPS
power_count = cars["powerPS"].value_counts().sort_index()
cars["powerPS"].describe()
sum(cars["powerPS"] > 500)
sum(cars["powerPS"] < 10)
sns.boxplot(cars["powerPS"])
sns.regplot(x="powerPS", y="price", scatter=True, fit_reg=False, data=cars)
#Ranging the data to make it more usefull
cars = cars[
(cars.yearOfRegistration >= 1950)
& (cars.yearOfRegistration <= 2018)
& (cars.price <= 150000)
& (cars.price >= 100)
& (cars.powerPS <= 500)
& (cars.powerPS >= 10)
]
cars["monthOfRegistration"] /= 12
#Adding Age
cars["Age"] = (2018-cars["yearOfRegistration"])+cars["monthOfRegistration"]
cars["Age"] = round(cars["Age"], 2)
cars["Age"].describe()
#Since age is deployed therefor removing
cols1 = ["yearOfRegistration", "monthOfRegistration"]
cars = cars.drop(columns = cols1, axis = 1)
cars1 = cars.copy()
#Vissualizing Parameters after narrowing the range form dataframe
#Age
sns.distplot(cars["Age"])
sns.boxplot(y=cars["Age"])
sns.regplot(x="Age", y="price", scatter=True, fit_reg=False, data=cars1)
#price
sns.distplot(cars["price"])
sns.boxplot(y=cars["price"])
#poweerPS
sns.distplot(cars["powerPS"])
sns.boxplot(y=cars["powerPS"])
sns.regplot(x="powerPS", y="price", scatter=True, fit_reg=False, data=cars1)
#=============================================================================
#Comparing and Analyzing each and every varaible with price
#And removing Insignificant columns
#=============================================================================
#seller
cars["seller"].value_counts()
pd.crosstab(cars["seller"], columns="count", normalize=True)
sns.countplot(x="seller", data=cars1)
sns.boxplot(x="seller", y="price", data=cars1)
#Fewer cars have commercial which is innsignificant
#does not affect price as seen in boxplot
cars1 = cars1.drop(columns=["seller"], axis=1)
#offerType
cars["offerType"].value_counts()
pd.crosstab(cars["offerType"], columns="count", normalize=True)
sns.countplot(x="offerType", data=cars1)
sns.boxplot(x="offerType", y="price", data=cars1)
#does not affect price as seen in boxplot
cars1 = cars1.drop(columns=["offerType"], axis=1)
#abtest
cars["abtest"].value_counts()
pd.crosstab(cars["abtest"], columns="count", normalize=True)
sns.countplot(x="abtest", data=cars1)
sns.boxplot(x="abtest", y="price", data=cars1)
#does not affect price as seen in boxplot
cars1 = cars1.drop(columns=["abtest"], axis=1)
#vehicleType
cars["vehicleType"].value_counts()
pd.crosstab(cars["vehicleType"], columns="count", normalize=True)
sns.countplot(x="vehicleType", data=cars1)
sns.boxplot(x="vehicleType", y="price", data=cars1)
#affecting the price
#gearbox
cars["gearbox"].value_counts()
pd.crosstab(cars["gearbox"], columns="count", normalize=True)
sns.countplot(x="gearbox", data=cars1)
sns.boxplot(x="gearbox", y="price", data=cars1)
#affecting the price
#model
cars["model"].value_counts()
| pd.crosstab(cars["model"], columns="count", normalize=True) | pandas.crosstab |
# 上市日期
# 分类
# 1. 行业
# 1.1 申万行业
# 1.2 同花顺行业
# 1.3 证监会行业
# 2. 地域
# 3. 概念
import re
import warnings
import akshare as ak
import numpy as np
import pandas as pd
from toolz.dicttoolz import merge
from cnswd.mongodb import get_db
from ..common import AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME
from .constants import (CN_TO_SECTOR, SECTOR_NAMES, SUPER_SECTOR_NAMES,
SW_SECTOR_NAMES)
from .utils import get_bcolz_col_names
THOUSAND_PAT = re.compile(",")
NUM_MAPS = {
1: '一级',
2: '二级',
3: '三级',
4: '四级',
}
# region 基础信息
def _listing_date():
"""上市日期"""
sh_df = ak.stock_info_sh_name_code(indicator="主板A股")
sh = {code: pd.to_datetime(dt, errors='coerce') for code, dt in zip(
sh_df['SECURITY_CODE_A'], sh_df['LISTING_DATE'])}
sz_df = ak.stock_info_sz_name_code(indicator="A股列表")
sz = {code: pd.to_datetime(dt, errors='coerce') for code, dt in zip(
sz_df['A股代码'], sz_df['A股上市日期'])}
return merge(sh, sz)
def get_ipo():
# 大量股票上市日期为空
db = get_db('wy')
collection = db['IPO资料']
docs = collection.find(
{},
projection={
'_id': 0,
'股票代码': 1,
'上市日期': 1,
}
)
df = | pd.DataFrame.from_records(docs) | pandas.DataFrame.from_records |
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from workalendar.europe import Greece
from scipy.interpolate import interp2d
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class dataset_creator_LV():
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1, test=False):
self.projects = projects
self.isfortest = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.create_logger()
self.check_dates()
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = data_variables
def create_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def check_empty_nwp(self, nwp, variables):
flag = True
for var in variables:
if nwp[var].shape[0] == 0:
flag = False
break
return flag
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
areas = project['static_data']['areas']
x = pd.DataFrame()
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
if self.check_empty_nwp(nwp, variables):
inp = self.create_sample_nwp(date, nwp, lats[project['_id']], longs[project['_id']])
x = pd.concat([x, inp])
except:
continue
if x.shape[0]>0:
X[project['_id']] = x
cols = ['Temp' + '_'+ area for area in lats[project['_id']].keys()]
X[project['_id']]['Temp_max'] = x[cols].mean(axis=1).max()
X[project['_id']]['Temp_min'] = x[cols].mean(axis=1).min()
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (X, t.strftime('%d%m%y%H%M'))
def lats_longs(self):
lats = dict()
longs = dict()
flag = False
for t in self.dates:
fname = os.path.join(self.path_nwp, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
pdates = pd.date_range(t + | pd.DateOffset(hours=24) | pandas.DateOffset |
# lowbot_Micro Sma v2.6.py
from distutils.command.build_py import build_py
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import pprint
import pandas as pd # needs pip install if not installed
import numpy as np
import matplotlib.pyplot as plt # needs pip install if not installed
import os
import math
import sys
import asyncio
import telegram_send
from multiprocessing import Process
import threading
import importlib
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
BLUE = '\033[93m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['TRX' + PAIR_WITH]['time'] > datetime.now() - timedelta(seconds=3):
# sleep for exactly the amount of time required
time.sleep(3)
print(f'Working...Session profit:{session_profit:.2f}% ')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check < CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(seconds=3)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(seconds=3):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
# print(f'{coin} has gained - {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes')
# else:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last 3 seconds, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check > CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
time.sleep(3)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
# converts the volume of coin to buy and finds the correct stepsize
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for TRX for example is 6 decimal points, while XRP just 1.
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# new code x% of free balance
free_balance = client.get_asset_balance(asset='BUSD')
free = math.floor(float(free_balance['free']) * 0.5)
# calculate the volume in coin from QUANTITY in 'PAIRWITH' (default)
volume[coin] = float(free / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
volume, last_price = convert_volume()
orders = {}
# # valid intervals - 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M
symbol = 'TRXBUSD'
starttime = '24 hours ago UTC' # to start for 1 week ago
interval = '15m'
bars = client.get_historical_klines(symbol, interval, starttime)
for line in bars: # Keep only first 5 columns, "date" "open" "high" "low" "close"
del line[5:]
df = | pd.DataFrame(bars, columns=['date', 'open', 'high', 'low', 'close']) | pandas.DataFrame |
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
import pyratings as rtg
from tests import conftest
@pytest.fixture(scope="session")
def rtg_inputs_longterm():
return pd.DataFrame(
data={
"rtg_sp": ["AAA", "AA-", "AA+", "BB-", "C", np.nan, "BBB+", "AA"],
"rtg_moody": ["Aa1", "Aa3", "Aa2", "Ba3", "Ca", np.nan, np.nan, "Aa2"],
"rtg_fitch": ["AA-", np.nan, "AA-", "B+", "C", np.nan, np.nan, "AA"],
}
)
@pytest.fixture(scope="session")
def rtg_inputs_shortterm():
return pd.DataFrame(
data={
"rtg_sp": ["A-1", "A-3", "A-1+", "D", "B", np.nan, "A-2", "A-3"],
"rtg_moody": ["P-2", "NP", "P-1", "NP", "P-3", np.nan, np.nan, "P-3"],
"rtg_fitch": ["F1", np.nan, "F1", "F3", "F3", np.nan, np.nan, "F3"],
}
)
def test_get_best_rating_longterm_with_explicit_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_longterm_with_inferring_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_longterm, tenor="long-term")
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_explicit_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_inferring_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_shortterm, tenor="short-term")
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_second_best_rating_longterm_with_explicit_rating_provider(
rtg_inputs_longterm,
):
"""Test computation of second-best ratings on a security (line-by-line) basis."""
actual = rtg.get_second_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AA+", "AA-", "AA", "BB-", "C", np.nan, "BBB+", "AA"],
name="second_best_rtg",
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_second_best_rating_longterm_with_inferring_rating_provider(
rtg_inputs_longterm,
):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_second_best_ratings(rtg_inputs_longterm, tenor="long-term")
expectations = pd.Series(
data=["AA+", "AA-", "AA", "BB-", "C", np.nan, "BBB+", "AA"],
name="second_best_rtg",
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_second_best_rating_shortterm_with_explicit_rating_provider(
rtg_inputs_shortterm,
):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_second_best_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-1", "B", "A-1+", "B", "A-3", np.nan, "A-2", "A-3"],
name="second_best_rtg",
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_second_best_rating_shortterm_with_inferring_rating_provider(
rtg_inputs_shortterm,
):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_second_best_ratings(rtg_inputs_shortterm, tenor="short-term")
expectations = pd.Series(
data=["A-1", "B", "A-1+", "B", "A-3", np.nan, "A-2", "A-3"],
name="second_best_rtg",
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_worst_rating_longterm_with_explicit_rating_provider(
rtg_inputs_longterm,
):
"""Test computation of second-best ratings on a security (line-by-line) basis."""
actual = rtg.get_worst_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AA-", "AA-", "AA-", "B+", "C", np.nan, "BBB+", "AA"], name="worst_rtg"
)
| pd.testing.assert_series_equal(actual, expectations) | pandas.testing.assert_series_equal |
from __future__ import annotations
import re
import warnings
from enum import Enum, auto
from typing import Dict, List, Union, Tuple, Optional
import numpy as np
import pandas as pd
import torch
from ..exceptions import TsFileParseException
from ..utils import stack_pad
class TsTagValuePattern(Enum):
"""
Enumeration holding the known `.ts` file headers tag value types in the form of regex expressions.
"""
BOOLEAN = re.compile('(?:tru|fals)e')
ANY_CONNECTED_STRING = re.compile('\\w+')
INTEGER_NUMBER = re.compile('\\d+')
CLASS_LABEL = re.compile('(?:tru|fals)e(?:(?<=true)((?: [^\s]+)+)|(?<=false))')
# ((?:tru|fals)e)(?(?<=true)((?: \w+)+))(?=\s)
class TsTag(str, Enum):
"""
Enumeration holding the names of the known `.ts` file tag names.
"""
PROBLEM_NAME = 'problemName'
TIMESTAMPS = 'timeStamps'
MISSING = 'missing'
EQUAL_LENGTH = 'equalLength'
SERIES_LENGTH = 'seriesLength'
CLASS_LABEL = 'classLabel'
UNIVARIATE = 'univariate'
DIMENSIONS = 'dimensions'
class TSFileLoader:
"""
File loader that can load time series files in sktimes `.ts` file format.
Args:
filepath (str): The path to the `.ts` file.
"""
class State(Enum):
"""
TSFileLoader's internal parsing state.
"""
PREFACE = 0
HEADER = 1
BODY = 2
BODY_TIME_STAMPS = 21
# Dict mapping known `.ts` file header tags to their respective parsing expression
header_info: Dict[TsTag, TsTagValuePattern] = {
TsTag.PROBLEM_NAME: TsTagValuePattern.ANY_CONNECTED_STRING,
TsTag.TIMESTAMPS: TsTagValuePattern.BOOLEAN,
TsTag.MISSING: TsTagValuePattern.BOOLEAN,
TsTag.EQUAL_LENGTH: TsTagValuePattern.BOOLEAN,
TsTag.SERIES_LENGTH: TsTagValuePattern.INTEGER_NUMBER,
TsTag.CLASS_LABEL: TsTagValuePattern.CLASS_LABEL,
TsTag.UNIVARIATE: TsTagValuePattern.BOOLEAN,
TsTag.DIMENSIONS: TsTagValuePattern.INTEGER_NUMBER
}
required_meta_info: List[TsTag] = [TsTag.PROBLEM_NAME, TsTag.CLASS_LABEL, TsTag.EQUAL_LENGTH, TsTag.MISSING,
TsTag.TIMESTAMPS]
def as_tensor(self, return_targets: bool = False) -> Union[torch.Tensor, Tuple[torch.Tensor, List[str]]]:
"""Return the loaded data as a 3 dimensional tensor of the form (N, C, S).
Keyword Args:
return_targets (bool):
Returns:
torch.Tensor: A 3 dimensional tensor.
"""
data_ = []
if len(self.data) == 0:
self.parse()
for dim in self.data:
data_.append(stack_pad(dim))
data_ = torch.permute(torch.stack(data_, dim=-1), (0, 2, 1))
if self.header[TsTag.CLASS_LABEL] and return_targets:
return data_, self.targets
return data_
def as_dataframe(self, return_targets: bool = False) -> pd.DataFrame:
"""Return the loaded data as a pandas dataframe.
Keyword Args:
return_targets (bool): Identifies whether the targets should be included in the returned dataframe. If
True, the targets will be added as an additional column 'targets' to the dataframe. This only has an effect
if there are class labels available in the datafile that was parsed in the first place.
Returns:
pd.DataFrame: A nested pandas dataframe holding the dimensions as columns and the number examples as rows,
where every cell contains a pandas Series containing a univariate time series.
If `return_targets` is set, it will also contain a column 'targets' that contains the class labels of every
example.
"""
data = pd.DataFrame(dtype=np.float32)
if len(self.data) == 0:
self.parse()
for dim in range(0, len(self.data)):
data["dim_" + str(dim)] = self.data[dim]
if self.header[TsTag.CLASS_LABEL] and return_targets:
data["targets"] = self.targets
return data
def get_classes(self):
"""Return the classes found in the '.ts' file
Returns:
List[str]: List of class names as string.
"""
if self.header[TsTag.CLASS_LABEL]:
return self.header[TsTag.CLASS_LABEL]
else:
raise AttributeError(f"The '.ts' file {self.filename} does not have any class labels")
def __init__(self, filepath: str, nan_replace_value: Union[int, float, str] = "NaN"):
self.filename = filepath
self.line_number = 1
self.file = open(filepath, "r", encoding="utf-8")
self.state = self.State.PREFACE
self.header = {k: None for k in self.header_info.keys()}
self.data = []
self.targets = []
self.dim = None
self.series_length = 0
self.timestamp_type = None
self.nan_replace_value = nan_replace_value
def parse_header(self, line: str) -> None:
"""Parses a line of a `.ts` file header and updates the internal state of the loader with the extracted
information.
Args:
line (str): The header line to parse.
Returns:
None
"""
if not line.startswith("@"):
raise TsFileParseException(
f"Line number {self.line_number} was interpreted as HEADER but does not start with '@'!"
)
line = line[1:]
if len(line) == 0:
raise TsFileParseException(
f"Line number {self.line_number} contains an empty tag!"
)
tokens = line.split(" ", maxsplit=1)
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException(
f"tag '{tokens[0]}' at line number {self.line_number} requires an associated value!"
)
tag = TsTag(tokens[0])
value_pattern = self.header_info[tag]
value = value_pattern.value.match(tokens[1])
if value:
if len(value.groups()) > 1:
raise TsFileParseException(
"Value extractor should return exactly ONE match!"
)
if len(value.groups()) > 0:
value = value.group(1)
else:
value = value.group(0)
self.header[tag] = self.parse_header_value(value, value_pattern)
def parse_header_value(self, value: str, value_type: TsTagValuePattern) -> Union[bool, str, int, List[str]]:
"""Parse a single header value that was extracted by the header line parser and return its value as the
appropriate python object.
Args:
value (str): Extracted header value that should be parsed.
value_type (TsTagValuePattern): The expected type of the value, which should be applied.
Returns:
bool: If the value is of type BOOLEAN. `value` converted to bool
str: If the value is of type ANY_CONNECTED_STRING. Returns the stripped value string.
List[str]: If the value is of type CLASS_LABEL. Returns a list of space separated string class labels.
"""
if value_type == TsTagValuePattern.BOOLEAN:
return value == 'true'
if value_type == TsTagValuePattern.ANY_CONNECTED_STRING:
return value.strip()
if value_type == TsTagValuePattern.CLASS_LABEL:
if value is None:
return False
return value.strip().split(" ")
if value_type == TsTagValuePattern.INTEGER_NUMBER:
try:
value = int(value)
except ValueError:
raise TsFileParseException(
f"Value '{value}' in line {self.line_number} could not be interpreted as int"
)
return value
def parse_body(self, line: str) -> None:
"""Parse a line of the `@data` content of a `.ts` file if `@timeStamps` is `False`.
Args:
line (str): The `@data` line to parse.
Returns:
None
"""
dimensions = line.split(":")
if not self.data:
if not self.header[TsTag.DIMENSIONS]:
warnings.warn("Meta information for '@dimensions' is missing. Inferring from data.",
UserWarning,
stacklevel=2)
self.dim = len(dimensions)
# last dimension is the target
if self.header[TsTag.CLASS_LABEL]:
self.dim -= 1
self.data = [[] for _ in range(self.dim)]
# Check dimensionality of the data of the current line
# All dimensions should be included for all series, even if they are empty
line_dim = len(dimensions)
if self.header[TsTag.CLASS_LABEL]:
line_dim -= 1
if line_dim != self.dim:
raise TsFileParseException(
f"Inconsistent number of dimensions. Expecting {self.dim} but got {line_dim} "
f"in line number {self.line_number}."
)
# Process the data for each dimension
for dim in range(0, self.dim):
dimension = dimensions[dim].strip()
if dimension:
dimension = dimension.replace("?", self.nan_replace_value)
data_series = dimension.split(",")
data_series = [float(i) for i in data_series]
dim_len = len(data_series)
if self.series_length < dim_len:
if not self.header[TsTag.EQUAL_LENGTH]:
self.series_length = dim_len
else:
raise TsFileParseException(
f"Series length was given as {self.series_length} but dimension {dim} in line "
f"{self.line_number} is of length {dim_len}"
)
self.data[dim].append( | pd.Series(data_series) | pandas.Series |
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameIndexingCategorical:
def test_assignment(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df["D"] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
df["E"] = s
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr._block.values, d)
# sorting
s.name = "E"
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = DataFrame(Series(cat))
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
# changed single value in cats col
cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame(
{"cats": cats4, "values": values4}, index=idx4
)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
msg1 = (
"Cannot setitem on a Categorical with a new category, "
"set the categories first"
)
msg2 = "Cannot set a Categorical with another, without identical categories"
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, 0] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("bb"), categories=list("abc"))
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("cc"), categories=list("abc"))
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", "cats"] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b", "c"]
)
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["c", "c"], categories=["a", "b", "c"]
)
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b", "c"]
)
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["c", "c"], categories=["a", "b", "c"]
)
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.loc["j":"k", df.columns[0]] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iat[2, 0] = "c"
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.at["j", "cats"] = "c"
# fancy indexing
catsf = Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"]
)
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.at["j", "cats"] = "c"
# Assigning a Category to parts of a int/... column uses the values of
# the Categorical
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
def test_functions_no_warnings(self):
df = DataFrame({"value": np.random.randint(0, 100, 20)})
labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df["group"] = pd.cut(
df.value, range(0, 105, 10), right=False, labels=labels
)
def test_setitem_single_row_categorical(self):
# GH 25495
df = DataFrame({"Alpha": ["a"], "Numeric": [0]})
categories = pd.Categorical(df["Alpha"], categories=["a", "b", "c"])
df.loc[:, "Alpha"] = categories
result = df["Alpha"]
expected = Series(categories, index=df.index, name="Alpha")
tm.assert_series_equal(result, expected)
def test_loc_indexing_preserves_index_category_dtype(self):
# GH 15166
df = DataFrame(
data=np.arange(2, 22, 2),
index=pd.MultiIndex(
levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
expected = pd.CategoricalIndex(
["a", "b"],
categories=["a", "b"],
ordered=False,
name="Index1",
dtype="category",
)
result = df.index.levels[0]
tm.assert_index_equal(result, expected)
result = df.loc[["a"]].index.levels[0]
tm.assert_index_equal(result, expected)
def test_wrong_length_cat_dtype_raises(self):
# GH29523
cat = | pd.Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"]) | pandas.Categorical.from_codes |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == 'A-DEC':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& ( | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pytest
from lookback import models
class TestChangeDatesGeneralCase:
@pytest.fixture
def shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_co_S1', 'uts_co_S2', 'uts_co_S3', 'uts_co_S4'],
'START_DATE': ['2020-02-05', '2020-02-20', '2020-02-25', '2020-03-01'],
'END_DATE': ['2020-02-19', '2020-02-24', '2020-02-28', '2021-01-01'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '2', '3', '4'],
'district_key': ['co_D1', 'co_D2', 'co_D3', 'co_D4'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
},
)
district_df['StartDate'] = pd.to_datetime(district_df['StartDate'])
district_df['EndDate'] = pd.to_datetime(district_df['EndDate'])
district_df.set_index('StartDate', drop=False, inplace=True)
return district_df
def test_change_dates_shape_before_district(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[0, test_columns].values.tolist() == [
np.datetime64('2020-02-05'), 'co', 'uts_co_S1', 'n/a', 'n/a'
]
def test_change_dates_new_district_existing_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[1, test_columns].values.tolist() == [
np.datetime64('2020-02-10'), 'co', 'uts_co_S1', '1', 'co_D1'
]
def test_change_dates_change_district_same_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[2, test_columns].values.tolist() == [
np.datetime64('2020-02-15'), 'co', 'uts_co_S1', '2', 'co_D2'
]
def test_change_dates_same_district_change_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[3, test_columns].values.tolist() == [
np.datetime64('2020-02-20'), 'co', 'uts_co_S2', '2', 'co_D2'
]
def test_change_dates_change_district_change_shape(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[4, test_columns].values.tolist() == [
np.datetime64('2020-02-25'), 'co', 'uts_co_S3', '3', 'co_D3'
]
def test_change_dates_change_district_change_shape_no_district_end_date(self, mocker, shape_data, district_data):
county = mocker.Mock()
county.name = 'co'
county.shape_df = shape_data
county.district_df = district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[5, test_columns].values.tolist() == [
np.datetime64('2020-03-01'), 'co', 'uts_co_S4', '4', 'co_D4'
]
class TestChangeDatesRichlandCase:
@pytest.fixture
def richland_shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_richland_S1', 'uts_rich_S1', 'uts_rich_S2'],
'START_DATE': ['2020-02-01', '2020-02-10', '2020-02-15'],
'END_DATE': ['2020-02-09', '2020-02-14', '2020-02-24'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def richland_district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '1', '2'],
'district_key': ['richland_D1', 'rich_D1', 'rich_D2'],
'StartDate': ['2020-02-5', '2020-02-10', '2020-02-20'],
'EndDate': [None, '2020-02-19', None],
},
)
district_df['StartDate'] = pd.to_datetime(district_df['StartDate'])
district_df['EndDate'] = pd.to_datetime(district_df['EndDate'])
district_df.set_index('StartDate', drop=False, inplace=True)
return district_df
def test_change_dates_richland_shape_no_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[0, test_columns].values.tolist() == [
np.datetime64('2020-02-01'), 'rich', 'uts_richland_S1', 'n/a', 'n/a'
]
def test_change_dates_richland_shape_richland_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[1, test_columns].values.tolist() == [
np.datetime64('2020-02-05'), 'rich', 'uts_richland_S1', '1', 'richland_D1'
]
def test_change_dates_rich_shape_rich_district(self, mocker, richland_shape_data, richland_district_data):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[2, test_columns].values.tolist() == [
np.datetime64('2020-02-10'), 'rich', 'uts_rich_S1', '1', 'rich_D1'
]
def test_change_dates_rich_shape_rich_district_no_district_end_date(
self, mocker, richland_shape_data, richland_district_data
):
county = mocker.Mock()
county.name = 'rich'
county.shape_df = richland_shape_data
county.district_df = richland_district_data
models.County.calc_change_dates(county)
test_columns = ['change_date', 'county_name', 'county_version', 'district_number', 'district_version']
assert county.change_dates_df.loc[4, test_columns].values.tolist() == [
np.datetime64('2020-02-20'), 'rich', 'uts_rich_S2', '2', 'rich_D2'
]
class TestAddingExtraFields:
def test_unique_district_key_creation(self, mocker):
county_mock = mocker.Mock()
county_mock.joined_df = pd.DataFrame(
data={
'change_date': ['2020-01-01', '2030-01-01'],
'district_number': ['1', '1S'],
'EndDate': ['2029-12-31', '2039-12-31'],
'county_version': ['uts_foo_S1', 'uts_foo_S1'],
'END_DATE': ['2029-12-31', '2039-12-31'],
}
)
county_mock.joined_df['change_date'] = | pd.to_datetime(county_mock.joined_df['change_date']) | pandas.to_datetime |
import pandas as pd
import itertools
import pandas_datareader.data as web
from pandas.tseries.offsets import BDay
from datetime import datetime
import random
import math
import collections
# Stocks data
tickers = ['FDX', 'GOOGL', 'XOM', 'KO', 'NOK', 'MS', 'IBM']
stocks_df = web.DataReader(tickers, 'yahoo', start=datetime(2010, 1, 1)).High
# Descriptors guarantee the type and behavior of variables
class Date:
"""General descriptor for date"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if isinstance(value, datetime):
instance.__dict__[self.storage_name] = value
else:
raise ValueError('value must be a datetime')
class OneOfStock:
"""General descriptor for stocks"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if value in set(tickers):
instance.__dict__[self.storage_name] = value
else:
raise ValueError("value must be on of 'FDX', 'GOOGL', 'XOM', 'KO', 'NOK', 'MS', 'IBM'")
class OneOfMode:
"""General descriptor for investor mode"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if value in {defensive, aggressive, mixed}:
instance.__dict__[self.storage_name] = value
else:
raise ValueError("value must be on of defensive, aggressive, mixed")
class Monetary:
"""General descriptor for monetary entries"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if (value is None) or (value >= 0):
instance.__dict__[self.storage_name] = value
else:
raise ValueError('value must be >= 0')
class Investment:
"""Investment is an abstract object. Bonds and Stocks inherit attributes and methods"""
pv = Monetary('pv')
start_date = Date('start_date')
end_date = Date('end_date')
def __init__(self, pv, start_date, end_date):
self.pv = pv
self.start_date = start_date
self.end_date = end_date
self.term = self.end_date - self.start_date
self.cash_flow = None
def return_on_investment(self):
return round((self.cash_flow.iloc[-1, 0] / self.pv) - 1, 4)
def total_return(self):
return round(self.cash_flow.iloc[-1, 0] - self.pv, 2)
def risk_on_investment(self):
return round(self.cash_flow.pct_change().std(), 4)
class Bonds(Investment):
"""All Bonds"""
def __init__(self, pv, rate: float, start_date, end_date):
super(Bonds, self).__init__(pv, start_date, end_date)
self.rate = rate
self.rate_flow = pd.Series(itertools.repeat((1 + self.rate) ** (1 / 365) - 1, self.term.days))
self.rate_flow.iloc[0] = 0
cash_flow = pd.DataFrame({'Date': pd.date_range(self.start_date, end=self.end_date, freq="D", closed='left'),
'Value': (1 + self.rate_flow).cumprod() * self.pv})
self.cash_flow = cash_flow.set_index('Date')
@classmethod # Call a bond as a short one. Ensure rate, min price and min period
def short(cls, start_date, pv=250):
if pv < 250:
raise ValueError('pv must be >= 250')
rate, end_date = 0.015, start_date + pd.DateOffset(years=2)
bond = cls(pv, rate, start_date, end_date)
return bond
@classmethod # Call a bond as a long one. Ensure rate, min price and min period
def long(cls, start_date, pv=1000):
if pv < 1000:
raise ValueError('pv must be >= 1000')
rate, end_date = 0.03, start_date + pd.DateOffset(years=5)
bond = cls(pv, rate, start_date, end_date)
return bond
def compound_rate(self, end_date):
"""Returns compound rate for a given date"""
total_days = (end_date - self.start_date).days
return round((1 + self.rate_flow.loc[self.rate_flow.index <= total_days]).prod() - 1, 4)
class Stocks(Investment):
"""All stocks"""
name = OneOfStock('name')
def __init__(self, name, start_date, end_date, num_stocks: int = 1):
self.name = name
self.start_date = start_date
self.end_date = end_date
self.num_stocks = num_stocks
self.price = stocks_df.loc[(self.start_date - BDay(1)):self.end_date, self.name]
self.pv = self.price.iloc[0] * self.num_stocks
cash_flow = pd.DataFrame({}, index=pd.date_range(start=(self.start_date - BDay(1)),
end=self.end_date, freq="D", closed='left'))
cash_flow = cash_flow.merge(pd.DataFrame({'Value': self.price * self.num_stocks}), how='outer',
right_on='Date', left_index=True)
self.cash_flow = cash_flow.set_index('Date').fillna(method='pad').loc[self.start_date:, ]
def return_on_stock(self, end_date):
"""Returns return on stock in a given date"""
return self.price[self.price.index <= end_date].iloc[-1] / self.price.iloc[0] - 1
def get_price(self, end_date):
"""Returns the price in a given date"""
return self.price[self.price.index <= end_date].iloc[-1]
# Investor is a named tuple if mode and budget as attributes
Investor = collections.namedtuple('Investor', ['mode', 'budget'])
class Portfolio:
"""All portfolios. Links Investor with investments"""
mode = OneOfMode('mode')
budget = Monetary('budget')
def __init__(self, investor: Investor, start_date, end_date, investment_weights: tuple = (75, 25)):
self.investor = investor
self.budget = self.investor[1]
self.start_date = start_date
self.end_date = end_date
self.investment_weights = investment_weights
self.term = self.end_date - self.start_date
self.mode = self.investor[0]
self.investments = self.mode(self)
self.invest_list = [(k, i) for k, i in self.investments.items()]
def portfolio_cash_flow(self):
investment_values = [self.investments[investment[0]].cash_flow for investment in self.invest_list]
investment_keys = [investment[0] for investment in self.invest_list]
port_cf = pd.concat(investment_values, keys=investment_keys,
names=['Investment', 'Date']).reset_index(level='Investment').fillna(method='pad')
return port_cf[port_cf.index < self.end_date]
# Mode functions. Defensive and Aggressive just fill the dict with bonds and stocks respectively,
# according with the type and values returned by accounting_investment().
def defensive(portfolio):
"""Builds a defensive portfolio"""
new_list = []
bonds = accounting_investment(portfolio)
investments = {key: getattr(Bonds, key)(start_date=portfolio.start_date, pv=bonds[key]) for key in bonds}
investments_list = [(k, i) for k, i in investments.items()]
end_dates = [(investments[investment[0]].end_date,
investments[investment[0]].cash_flow.iloc[-1, 0]) for investment in investments_list]
for end_date in end_dates:
if end_date[0] < portfolio.end_date:
# Deal with overdue bonds. Starts a recursive function to build a new portfolio with same mode
# and calculates the new budget as bond's final value + rest of old budget.
investor = Investor(portfolio.investor[0], end_date[1] + portfolio.budget)
new_port = Portfolio(investor, start_date=end_date[0],
end_date=portfolio.end_date)
new_list.extend([(k, i) for k, i in new_port.investments.items()])
portfolio.budget += new_port.budget
investments_2 = {k + '_' + str(math.trunc(random.random() * 100)): i for k, i in new_list}
return {**investments, **investments_2}
def aggressive(portfolio):
"""Builds an aggressive portfolio"""
stocks = accounting_investment(portfolio)
return {key: Stocks(name=key, start_date=portfolio.start_date, end_date=portfolio.end_date,
num_stocks=stocks[key]) for key in stocks}
def mixed(portfolio):
"""builds a mixed portfolio"""
investments = {}
temp = []
# while budget is enough to buy a short bond, randomly weighted choose bond or stock.
while portfolio.budget >= Bonds.short(start_date=portfolio.start_date).pv:
mode_function = random.choices([pick_stock(portfolio), pick_bond(portfolio)],
weights=portfolio.investment_weights)
try: # if no bond or stock is bought just move on to the next
portfolio.budget += - mode_function[0][1] # subtract the acquisition price from the budget
key = list(mode_function[0][0].keys())[0]
if key in investments:
investments[key] += mode_function[0][0][key]
else:
investments[key] = mode_function[0][0][key]
except TypeError: # Skips when "pick stocks" doesn't returns a dict.
pass
for key in investments:
if key in {'short', 'long'}:
investments[key] = getattr(Bonds, key)(pv=investments[key], start_date=portfolio.start_date)
if investments[key].end_date <= portfolio.end_date:
budget = investments[key].cash_flow.iloc[-1, 0] + portfolio.budget
investor = Investor(portfolio.investor[0], budget)
new_port = Portfolio(investor, start_date=investments[key].end_date,
end_date=portfolio.end_date, investment_weights=portfolio.investment_weights)
temp.extend([(k, i) for k, i in new_port.investments.items()])
portfolio.budget += new_port.budget
elif key in tickers:
investments[key] = Stocks(key, start_date=portfolio.start_date, end_date=portfolio.end_date,
num_stocks=investments[key])
else:
pass
investments2 = {k + '_' + str(math.trunc(random.random() * 100)): i for k, i in temp}
return {**investments, **investments2}
# Functions for mode
def pick_bond(portfolio):
"""Randomly picks a bond"""
# budget is enough to buy a long bond, randomly choose one.
if portfolio.budget >= Bonds.long(start_date=portfolio.start_date).pv:
bond_type = random.choice(['long', 'short'])
value = getattr(Bonds, bond_type)(start_date=portfolio.start_date).pv
return {bond_type: value}, value
# budget is enough to buy a short bond, buy all budget.
else:
bond_type = 'short'
value = portfolio.budget
return {bond_type: value}, value
def pick_stock(portfolio):
"""Randomly picks a stock"""
stock = random.choice(tickers)
try:
price = Stocks(name=stock, start_date=portfolio.start_date, end_date=portfolio.end_date).pv
max_num_stocks = int(portfolio.budget / price)
# Exception when the range date doesn't allow pick a stock. when Bonds due in the last few days of a portfolio.
except KeyError:
max_num_stocks = 0
if max_num_stocks < 1:
pass
else:
amount = random.randint(1, max_num_stocks)
return {stock: amount}, amount * price
def accounting_investment(portfolio):
"""Accounts bonds or stocks"""
investments = {}
if portfolio.mode is defensive:
min_budget = Bonds.short(start_date=portfolio.start_date).pv
function = pick_bond
else:
min_budget = 100
function = pick_stock
while portfolio.budget >= min_budget:
invest = function(portfolio)
try: # if no bond or stock is bought just move on to the next
key = list(invest[0].keys())[0]
portfolio.budget += - invest[1]
if key in investments:
investments[key] += invest[0][key]
else:
investments[key] = invest[0][key]
except TypeError: # Skips when "pick stocks" doesn't returns a dict.
pass
return investments
# Working on simulations
def return_and_vol_on_portfolios(portfolio_lists: list, lists_names: list):
"""Computes mean return and the mean volatility for each portfolios group"""
i = 0
return_on_group = {}
for group in portfolio_lists:
return_on_portfolio = [(p.portfolio_cash_flow().groupby('Date').sum().Value[-1] /
p.portfolio_cash_flow().groupby('Date').sum().Value[0]) - 1 for p in group]
vol_on_portfolio = [p.portfolio_cash_flow().groupby('Date').sum().pct_change().std().Value for p in group]
mean_return_portfolio = sum(return_on_portfolio) / len(return_on_portfolio)
mean_vol_portfolio = sum(vol_on_portfolio) / len(vol_on_portfolio)
return_on_group[lists_names[i]] = (round(mean_return_portfolio, 4), round(mean_vol_portfolio, 4))
i += 1
return | pd.DataFrame(return_on_group, index=['Investment return', 'daily volatility']) | pandas.DataFrame |
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_max = pd.Series([3, 2, 3])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all()
def test_dataframe_elementwise_max():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_max = pd.DataFrame([[3, 2, 3], [3, 2, 3]])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all().all()
def test_series_elementwise_min():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_min = pd.Series([1, 2, 1])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all()
def test_dataframe_elementwise_min():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_min = pd.DataFrame([[1, 2, 1], [1, 2, 1]])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all().all()
def test_dinamize_as_berbeglia():
pickup_location_x_coord = pd.Series([1])
pickup_location_y_coord = pd.Series([1])
delivery_location_x_coord = pd.Series([-1])
delivery_location_y_coord = pd.Series([-1])
pickup_upper_tw = pd.Series([10.0])
delivery_upper_tw = pd.Series([12.0])
pickup_service_time = pd.Series([1.0])
alpha = 0
beta = 1
# tempo esperado usando a equação de dinamização de berbeglia
expected_arrival_time = pd.Series([7])
calculated_arrival_time = (
din.dinamize_as_berbeglia(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord,
pickup_upper_tw,
delivery_upper_tw,
pickup_service_time,
alpha,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_pureza_laporte():
depot_location_x = pd.Series([0])
depot_location_y = pd.Series([0])
pickup_location_x_coord = pd.Series([1])
pickup_location_y_coord = pd.Series([1])
pickup_lower_tw = pd.Series([2])
pickup_upper_tw = pd.Series([10])
beta = 1
# tempo esperado usando a equação de dinamização de pureza e laporte
expected_arrival_time = 2
calculated_arrival_time = (
din.dinamize_as_pureza_laporte(
depot_location_x,
depot_location_y,
pickup_location_x_coord,
pickup_location_y_coord,
pickup_lower_tw,
pickup_upper_tw,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_pankratz():
depot_location_x = pd.Series([0])
depot_location_y = pd.Series([0])
pickup_location_x_coord = pd.Series([-1])
pickup_location_y_coord = pd.Series([-1])
delivery_location_x_coord = pd.Series([1])
delivery_location_y_coord = pd.Series([1])
pickup_upper_tw = pd.Series([10])
delivery_upper_tw = pd.Series([20])
pickup_service_time = pd.Series([1])
beta = 0.6
# tempo esperado usando a equação de dinamização de pankratz e arredondado
# para o próximo inteiro
expected_arrival_time = 5
calculated_arrival_time = (
din.dinamize_as_pankratz(
depot_location_x,
depot_location_y,
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord,
pickup_upper_tw,
delivery_upper_tw,
pickup_service_time,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_fabri_recht():
pickup_location_x_coord = pd.Series([-1])
pickup_location_y_coord = pd.Series([-1])
delivery_location_x_coord = | pd.Series([1]) | pandas.Series |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: ENN.py
# @Author: <NAME>
# @Time: 16/11/21 17:14
# @Version: 7.0
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from .utils import transform, transform_original_complete
class ENN:
"""
<NAME>. (1972). Asymptotic properties of nearest neighbor rules
using edited data. IEEE Transactions on Systems, Man, and
Cybernetics, (3), 408-421.
Parameters
----------
nearest_neighbors : int, default=3
Number to use as nearest neighbors when computing distances.
power_parameter : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance (l2)
for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
"""
def __init__(self, nearest_neighbors=3, power_parameter=2):
"""
The function takes in two parameters, nearest_neighbors
and power_parameter, and assigns them to the attributes
nearest_neighbors and power_parameter
:param nearest_neighbors: The number of nearest neighbors to use when
calculating the weights, defaults to 3 (optional)
:param power_parameter: This is the exponent that is used to calculate
the weights, defaults to 2 (optional)
"""
self.nearest_neighbors = nearest_neighbors
self.power_parameter = power_parameter
self.x_attr = None
def _neighs(self, s_samples, s_targets, index, removed):
"""
The function takes in the samples and targets, the index of the
sample to be removed, and the number of samples already removed. It
returns the sample to be removed, its target, the targets of the
samples not yet removed, the samples not yet removed, and the
indices of the nearest neighbors of the sample to be removed.
:param s_samples: the samples that are being used to train the model
:param s_targets: the targets of the samples
:param index: the index of the sample to be removed
:param removed: the number of samples that have been removed from the
dataset
"""
x_sample = s_samples[index - removed]
x_target = s_targets[index - removed]
knn = NearestNeighbors(
n_jobs=-1, n_neighbors=self.nearest_neighbors, p=2)
samples_not_x = s_samples[: index - removed] + \
s_samples[index - removed + 1:]
targets_not_x = s_targets[: index - removed] + \
s_targets[index - removed + 1:]
knn.fit(samples_not_x)
_, neigh_ind = knn.kneighbors([x_sample])
return x_sample, x_target, targets_not_x, samples_not_x, neigh_ind
def filter(self, samples, y):
"""
Implementation of the Wilson Editing algorithm.
For each sample locates the *k* nearest neighbors and selects the
number of different classes there are.
If a sample results in a wrong classification after being classified
with k-NN, that sample is removed from the TS.
:param samples: DataFrame.
:param y: DataFrame.
:return: the input dataset with the remaining samples.
"""
self.x_attr = samples.keys()
samples = transform(samples, y)
size = len(samples["data"])
s_samples = list(samples["data"])
s_targets = list(samples["target"])
removed = 0
for index in range(size):
_, x_target, targets_not_x, samples_not_x, neigh_ind = self._neighs(
s_samples, s_targets, index, removed
)
y_targets = np.ravel(
np.array([targets_not_x[x] for x in neigh_ind[0]])
).astype(int)
count = np.bincount(y_targets)
max_class = np.where(count == np.amax(count))[0][0]
if max_class != x_target:
removed += 1
s_samples = samples_not_x
s_targets = targets_not_x
samples = pd.DataFrame(s_samples, columns=self.x_attr)
y = pd.DataFrame(s_targets)
return samples, y
def filter_original_complete(self, original, original_y, complete, complete_y):
"""
Modification of the Wilson Editing algorithm.
For each sample locates the *k* nearest neighbors and selects the number
of different classes there are.
If a sample results in a wrong classification after being classified
with k-NN, that sample is removed from the TS, only if the sample to be
removed is not from the original dataset.
:param original: DataFrame: dataset with the initial samples.
:param original_y: DataFrame: labels.
:param complete: DataFrame: dataset with the initial samples and the new
ones added by self-training.
:param complete_y: labels.
:return: the input dataset with the remaining samples.
"""
self.x_attr = original.keys()
original, complete = transform_original_complete(
original, original_y, complete, complete_y
)
size = len(complete["data"])
s_samples = list(complete["data"])
s_targets = list(complete["target"])
o_samples = list(original["data"])
removed = 0
for index in range(size):
x_sample, x_target, targets_not_x, samples_not_x, neigh_ind = self._neighs(
s_samples, s_targets, index, removed
)
y_targets = [targets_not_x[x] for x in neigh_ind[0]]
count = np.bincount(np.ravel(y_targets))
max_class = np.where(count == np.amax(count))[0][0]
if max_class != x_target:
delete = True
for o_sample in o_samples:
if np.array_equal(o_sample, x_sample):
delete = False
if delete:
removed += 1
s_samples = samples_not_x
s_targets = targets_not_x
samples = | pd.DataFrame(s_samples, columns=self.x_attr) | pandas.DataFrame |
#!/usr/bin/env python3
# Accept S3 dir for reference fasta
# Accept Parent S3 dir for BAMs
# Accept uniquely identifiable sample group
# Accept s3 output location.
# For each genome:
# For each sample in this group:
# get coverage distribution
# write it to an appropriate s3 location
import concurrent.futures
import itertools
import logging
import boto3
import pandas as pd
import os
import sys
from pandas.io.parsers import read_csv
from tqdm import tqdm
from invaderCheck import genome_coverage_distribution_with_subsampling
from invaderCheck import compute_strain_difference
from invaderCheck import compare_distributions_wViz
def get_file_names(bucket_name, prefix, suffix="txt"):
"""
Return a list for the file names in an S3 bucket folder.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch keys that start with this prefix (folder name).
:param suffix: Only fetch keys that end with this suffix (extension).
"""
s3_client = boto3.client("s3")
response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
try:
objs = response["Contents"]
except KeyError as ke:
logging.error(
f"Path with bucket '{bucket_name}' and prefix '{prefix}' does not exist!"
)
raise(f"KeyError:{ke}")
while response["IsTruncated"]:
response = s3_client.list_objects_v2(
Bucket=bucket_name,
Prefix=prefix,
ContinuationToken=response["NextContinuationToken"],
)
objs.extend(response["Contents"])
logging.info(f"Sifting through {len(objs)} files ...")
shortlisted_files = list()
if suffix == "":
shortlisted_files = [obj["Key"] for obj in objs]
total_size_bytes = sum([obj["Size"] for obj in objs])
else:
shortlisted_files = [obj["Key"] for obj in objs if obj["Key"].endswith(suffix)]
total_size_bytes = sum(
[obj["Size"] for obj in objs if obj["Key"].endswith(suffix)]
)
logging.info(
f"Found {len(shortlisted_files)} files, totalling about {total_size_bytes/1e9:,.3f} Gb."
)
# return shortlisted_files
return [f"s3://{bucket_name}/{file_path}" for file_path in shortlisted_files]
def get_bam_file(s3uri, sample_name, subfolder="bowtie2", suffix="bam"):
bucket, file_prefix = declutter_s3paths(f"{s3uri}/{sample_name}/{subfolder}")
return get_file_names(bucket, file_prefix, suffix="bam")[0]
def create_comparison_df(df, challenged_on):
inoculum_query_samples = (
df.query("Location == 'ex_vivo'")
.reset_index(drop=True)
.rename(columns={"sample_id": "query_sample_id"})
)[["query_sample_id", "Week", "MouseNum", "MouseOrder"]]
inoculum_weeks = set(inoculum_query_samples["Week"])
base_samples = (
df.query("Week == @challenged_on")
.reset_index(drop=True)
.rename(columns={"sample_id": "base_sample_id"})
)[["base_sample_id", "Week", "MouseNum", "MouseOrder"]]
# I'll only be able to use the mice who have a base to compare
# pylint: disable=unused-variable
selected_mice = sorted(base_samples["MouseNum"].unique())
challenge_samples = (
df.dropna(subset=["Challenge"])
.query("MouseNum in @selected_mice")
.reset_index(drop=True)
.rename(columns={"sample_id": "query_sample_id"})
)[["query_sample_id", "Week", "MouseNum", "MouseOrder"]]
challenge_weeks = set(challenge_samples["Week"])
all_query_samples = pd.concat([challenge_samples, inoculum_query_samples])
compare_df = all_query_samples.merge(
right=base_samples, on=["MouseNum", "MouseOrder"], suffixes=("_query", "_base")
)
return compare_df, challenge_weeks, inoculum_weeks
def setup_experiment(
metadata, keep_locations=["ex_vivo", "gut"], challenged_on="Week4",
):
# metadata = sample_metadata
all_bam_paths_list = list()
# setup_experiment(sample_metadata, s3_bam_dir, challenged_on="Week4")
df = pd.read_csv(metadata, header=0).query("Location in @keep_locations")
df["bam_file"] = df.apply(
lambda x: get_bam_file(x.bam_location, x.sample_id), axis=1
)
all_bam_paths_list = sorted(df["bam_file"].unique())
comparison_df, challenge_weeks, inoculum_weeks = create_comparison_df(
df, challenged_on
)
comparisons = list()
for week in challenge_weeks:
if week in inoculum_weeks:
continue
# pylint: disable=unused-variable
comparison_target_weeks = {week} | inoculum_weeks
comparisons.append(
comparison_df.query("Week_query in @comparison_target_weeks")
.sort_values(["Week_query", "MouseOrder"], ascending=[False, True])
.reset_index(drop=True)[["base_sample_id", "query_sample_id", "Week_query"]]
)
return all_bam_paths_list, comparisons, challenge_weeks
def declutter_s3paths(s3uri):
s3path_as_list = s3uri.replace("s3://", "").rstrip("/").split("/")
bucket = s3path_as_list.pop(0)
prefix = "/".join(s3path_as_list)
return bucket, prefix
def download_from_s3(s3_uri, local_dir):
s3 = boto3.client("s3")
bucket, file_obj = declutter_s3paths(s3_uri)
local_file = f"{local_dir}/{os.path.basename(file_obj)}"
if not os.path.exists(local_file):
with open(local_file, "wb") as f:
s3.download_fileobj(bucket, file_obj, f)
return local_file
def upload_to_s3(s3_uri_dir, local_obj):
s3 = boto3.client("s3")
bucket, obj_dir = declutter_s3paths(s3_uri_dir)
file_name = os.path.basename(local_obj)
# with open(local_obj, "rb") as f:
# s3.upload_fileobj(f, bucket, f"{obj_dir}/{file_name}")
s3.meta.client.upload_file(local_obj, bucket, f"{obj_dir}/{file_name}")
return
def depth_vector_exists(genome, bam_file, min_qual, min_pid, min_paln):
genome_name = os.path.splitext(os.path.basename(genome))[0]
output_dir = f"{genome_name}_q{min_qual}_id{min_pid}_aln{min_paln}_vectors"
file_name = os.path.basename(bam_file).split("_vs_")[0].split(".")[0]
exp_vector_path = f"{output_dir}/{file_name}.q{min_qual}_id{min_pid}_aln{min_paln}.ref_depth.csv.gz"
return os.path.exists(exp_vector_path)
def get_coverage_distribution(
bam_s3_uri,
fasta_list,
local_tmp_dir,
min_qual=20,
min_pid=99,
min_paln=100,
subset_list=None,
):
if not bam_s3_uri.endswith("bam"):
return
bam_file = f"{local_tmp_dir}/{os.path.basename(bam_s3_uri)}"
bai_file = f"{bam_file}.bai"
logging.info(f"Calculating coverage distribution {bam_file} ...")
unprocessed_vectors = [
depth_vector_exists(genome, bam_file, min_qual, min_pid, min_paln)
for genome in fasta_list
]
# download bam file and index, if needed.
if not all(unprocessed_vectors):
logging.info(f"Downloading {bam_file} ...")
bam_file = download_from_s3(bam_s3_uri, local_tmp_dir)
bai_file = download_from_s3(f"{bam_s3_uri}.bai", local_tmp_dir)
# Get genome coverage for each genome in the bam file
depth_files_list = [
genome_coverage_distribution_with_subsampling.get_coverage_distribution(
bam_file,
fasta_file,
min_qual=min_qual,
min_pid=min_pid,
min_paln=min_paln,
subset_list=subset_list,
)
for fasta_file in fasta_list
]
# delete bam file and index
logging.info(f"Done processing {bam_file}.")
if os.path.exists(bam_file):
logging.info("Removing BAM files to save space.")
os.remove(bam_file)
os.remove(bai_file)
return pd.DataFrame(depth_files_list)
def get_comparison_df(comparison_week, organism_df, all_challenge_weeks):
# c = get_comparison_df(comparisons[0], organism_df)
# c.to_csv("AKM_compare.csv", index=False)
week_sets = set(comparison_week["Week_query"])
assert (
len(week_sets) == 2
), f"Comparison dataframe is malformed, contains the following sets: {week_sets}"
week = all_challenge_weeks & week_sets
df = (
comparison_week.merge(
how="left",
right=organism_df,
left_on="query_sample_id",
right_on="Sample_Name",
)
.drop(["Sample_Name", "Genome_Name"], axis=1)
.merge(
how="left",
right=organism_df,
left_on="base_sample_id",
right_on="Sample_Name",
suffixes=("_query", "_base"),
)
.drop(["Sample_Name", "Genome_Name"], axis=1)
.rename(columns={"Depth_Vector_query": "query", "Depth_Vector_base": "base"})
)
return week.pop(), df
def compute_depth_profiles(
sample_metadata, s3_fasta_dir, vector_paths_file, local_tmp_dir="TEMP"
):
# From the metadata file, get
# 1. a list of bam files
# 2. a list of df for each base week vs query week samples
all_bam_paths_list, comparisons, challenge_weeks = setup_experiment(sample_metadata)
# Download all genomes
os.makedirs(local_tmp_dir, exist_ok=True)
s3_fasta_bucket, s3_fasta_prefix = declutter_s3paths(s3_fasta_dir)
s3_fasta_suffix = "fna"
all_fasta_paths_list = get_file_names(
s3_fasta_bucket, s3_fasta_prefix, s3_fasta_suffix
)
logging.info(f"Downloading {len(all_fasta_paths_list)} Genomes")
local_fasta_files = [
download_from_s3(fasta_s3_uri, local_tmp_dir)
for fasta_s3_uri in tqdm(all_fasta_paths_list, ascii=True, desc="Genomes")
]
genome_names = [
os.path.splitext(os.path.basename(genome))[0] for genome in local_fasta_files
]
_ = [
os.makedirs(
f"{genome_name}_q{min_qual}_id{min_pid}_aln{min_paln}_vectors",
exist_ok=True,
)
for genome_name in genome_names
]
vector_paths = list()
with concurrent.futures.ProcessPoolExecutor(max_workers=max_cores) as executor:
future = [
executor.submit(
get_coverage_distribution,
bam_file,
local_fasta_files,
local_tmp_dir,
min_qual=min_qual,
min_pid=min_pid,
min_paln=min_paln,
subset_list=None,
)
for bam_file in all_bam_paths_list
]
for f in tqdm(
concurrent.futures.as_completed(future),
ascii=True,
desc="Genome Coverage Distribution",
):
vector_paths.append(f.result())
vector_paths_df = pd.concat(vector_paths)
# logging.info(vector_paths_df.shape)
# logging.info(f"\n{vector_paths_df.head()}")
vector_paths_df.to_csv(vector_paths_file, index=False)
## Upload vectors to S3
# with concurrent.futures.ProcessPoolExecutor(max_workers=max_cores) as executor:
# future = [
# executor.submit(
# upload_to_s3,
# f"{s3_vector_output_dir}/{row.Genome_Name}",
# row.Depth_Vector,
# )
# for row in vector_paths_df.itertuples()
# ]
# for f in tqdm(concurrent.futures.as_completed(future), ascii=True, desc="Uploading depth profiles to S3"):
# _ = f.result()
return genome_names, comparisons, challenge_weeks, vector_paths_df
def compute_differences_per_genome_per_week(week, genome, compare_df, cores, plot=True):
num_comparisons, _ = compare_df.shape
logging.info(
f"\n*** Computing {num_comparisons} difference vectors for {genome} from reference week to {week} ***\n"
)
cores_per_job = min(cores, num_comparisons)
strain_week_df = compute_strain_difference.compute_differences(
week, genome, compare_df, cores=cores_per_job, plotting=plot
)
strain_week_df["Organism"] = genome
strain_week_df["QueryWeek"] = week
return strain_week_df
def parallelize_compute_differences(
genome_names,
comparisons,
challenge_weeks,
vector_paths_df,
weekly_differences_filepath,
cores,
plot,
):
# compute strain differences
comparison_parameters = list()
for genome in genome_names:
organism_df = vector_paths_df.query("Genome_Name == @genome")
for comparison_week_df in comparisons:
# compare_df has 2 columns, 1 = "base" vector path (top), 2 = "query" vector path (bottom) in Mouse order
challenge_week, compare_df = get_comparison_df(
comparison_week_df, organism_df, challenge_weeks
)
comparison_parameters.append((challenge_week, genome, compare_df))
weekly_diffs_df_list = list()
num_parallel_jobs = 2
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_parallel_jobs
) as executor:
future = [
executor.submit(
compute_differences_per_genome_per_week,
week,
genome,
compare_df,
cores=int(cores / num_parallel_jobs),
)
for week, genome, compare_df in comparison_parameters
]
for f in tqdm(
concurrent.futures.as_completed(future), ascii=True, desc="Compute Differences",
):
weekly_diffs_df_list.append(f.result())
weekly_differences_df = pd.concat(weekly_diffs_df_list)
weekly_differences_df.to_csv(weekly_differences_filepath, index=False)
return weekly_differences_df
def get_invader_check_predictions(
week,
genome,
intermediate_files_metadata_df,
control_term="PBS",
method="majority",
cutoff=0.002,
cores=10,
min_extreme_pos=2,
):
df = intermediate_files_metadata_df.query(
"(Organism == @genome) and (QueryWeek == @week)"
).reset_index(drop=True)
output_folder_path = os.path.join(genome, week)
# *.npy (column: diff_array_path) for control mice
control_profiles = list(df.query("Challenge == @control_term")["diff_array_path"])
# *.npy (column: diff_array_path) for challenged mice
sample_profiles = list(df.query("Challenge != @control_term")["diff_array_path"])
# *.ref_depth.csv.gz (column: query) for all mice
init_profiles_df = df.rename(
columns={"query_sample_id": "Query", "query": "S3Path"}
)[["Query", "S3Path"]]
comparison_df_filepath = compare_distributions_wViz.run(
output_folder_path,
genome,
method,
control_profiles,
sample_profiles,
init_profiles_df,
cutoff=cutoff,
cores=cores,
invader_extreme_pos=min_extreme_pos,
)
df["Comparison_Stats"] = comparison_df_filepath
return df
def detect_invaders(
challenge_weeks,
genome_names,
intermediate_files_metadata_df,
ic_pred_df_file,
cores,
control_term="PBS",
method="majority",
cutoff=0.002,
min_extreme_pos=2,
):
num_parallel_jobs = 2
ic_pred_df_list = list()
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_parallel_jobs
) as executor:
future = [
executor.submit(
get_invader_check_predictions,
week,
genome,
intermediate_files_metadata_df,
cores=int(cores / num_parallel_jobs),
control_term=control_term,
method=method,
cutoff=cutoff,
min_extreme_pos=min_extreme_pos,
)
for week, genome in itertools.product(challenge_weeks, genome_names)
]
for f in tqdm(
concurrent.futures.as_completed(future), ascii=True, desc="Compare Differences",
):
ic_pred_df_list.append(f.result())
ic_pred_df = | pd.concat(ic_pred_df_list) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = | pd.date_range(start_date, end_date, freq='D') | pandas.date_range |
import pandas as pd
import re
import sys, os
import requests
from bs4 import BeautifulSoup
import boto3
from PremierLeague import PremierLeague
from botocore.exceptions import ClientError
import logging
COLUMNS_TO_KEEP = ['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'HST', 'AST', 'HC', 'AC',
'B365H', 'B365D', 'B365A', 'B365>2.5', 'B365<2.5', 'year']
class UpdateDataset():
def __init__(self, year, bucketName, url, prefixUrl, datasetFilename, teamsGamesFilename, statisticsFilename, ratingsFilename):
self._year = year
self._s3 = boto3.client('s3')
self._bucketName = bucketName
self._url = url
self._prefixUrl = prefixUrl
self._datasetPath = datasetFilename
self._teamsGamesPath =teamsGamesFilename
self._statisticsPath = statisticsFilename
self._ratingsPath = ratingsFilename
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
def update_dataframes_in_s3bucket(self):
try:
dfEplGames = self.get_current_dataset() # scrapper
self._logger.info('Successfully extracted the dataset from {}.'.format(self._url))
except Exception as e:
self._logger.error('Fatal error while trying to extract the dataset from {}.'.format(self._url))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self._logger.error(exc_type, fname, exc_tb.tb_lineno)
dataframes = {}
for path in [self._datasetPath, self._teamsGamesPath, self._ratingsPath, self._statisticsPath]:
try:
dataframes[path] = self.get_dataframe_in_s3bucket(path)
self._logger.info('Successfully downloaded {} from S3 bucket {}.'.format(self._datasetPath, self._bucketName))
except Exception as e:
self._logger.error('Fatal error while trying to download {} dataset from S3 bucket {}.'.format(self._datasetPath, self._bucketName))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self._logger.error(exc_type, fname, exc_tb.tb_lineno)
try:
firstGameId = self.extract_first_game_id(dataframes[self._datasetPath]) # extract_first_game_id
premierLeague = PremierLeague(False, dataframes[self._ratingsPath], 4, self._year, firstGameId)
newDataset = premierLeague.get_dataset_update(dfEplGames)
dfDataset = self.update_dataset(dataframes[self._datasetPath], newDataset) # datasets marging
self.upload_dataset(dfDataset, 'dataset')
newTeamsGames = premierLeague.get_data_teams(self._year) # teamsGames marging
dfTeamsGames = self.update_games(dataframes[self._teamsGamesPath], newTeamsGames)
self.upload_dataset(dfTeamsGames, 'teamsGames')
dfStatistics = self.update_statistics(dataframes[self._statisticsPath], dfEplGames) # OK
self.upload_dataset(dfStatistics, 'statistics')
except Exception as e:
self._logger.error('Fatal error while trying to update dataset.')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self._logger.error(exc_type, fname, exc_tb.tb_lineno)
def get_current_dataset(self):
return self.scrapper()
def scrapper(self):
page = requests.get(self._url)
soup = BeautifulSoup(page.content, "html.parser")
regExpression = '[a-zA-Z]+[0-9]+/[0-9]+/[a-zA-Z]0\.[a-zA-Z]+'
for link in soup.find_all('a', href=True):
z = re.match(regExpression, link['href'])
if z:
dataLink = link['href']
break
try:
return pd.read_csv(self._prefixUrl+dataLink)
except:
return False
def get_dataframe_in_s3bucket(self, dataframeName):
response = self._s3.get_object(Bucket=self._bucketName, Key=dataframeName)
status = response.get("ResponseMetadata", {}).get("HTTPStatusCode")
if status == 200:
df = pd.read_csv(response.get("Body"))
return df
else:
return False
def extract_first_game_id(self, df):
# First extract 'id' and 'year' column
# Then extract 2022 rows
# Look for the minimum value of the 'id' column
# -40 -> because the first games of each teams is not taken into account
# Should think about it
return df[['id', 'year']][df['year'] == self._year]['id'].min(axis=0)-41
def update_dataset(self, df, dfCur):
df = df[df['year'] != 2022] # delete all 2022 games
# Add column year
dfCur['year'] = [self._year for i in range(dfCur.shape[0])]
# Merge those datasets
frames = [df, dfCur]
return pd.concat(frames).sort_values(by='id', ascending=True)
def update_games(self, df, dfCur):
frames = [df, dfCur]
return | pd.concat(frames) | pandas.concat |
"""
"""
import base64
import io
import pathlib
from zipfile import ZipFile
import dash
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import wiutils
from dash.dependencies import Output, Input, State
from scipy.stats.kde import gaussian_kde
def _plot_accumulation_curve(images, deployments):
images = images.copy()
images["timestamp"] = pd.to_datetime(images["timestamp"])
images["date"] = pd.to_datetime(images["timestamp"].dt.date)
start = pd.to_datetime(deployments["start_date"]).min()
end = pd.to_datetime(deployments["end_date"]).max()
date_range = pd.date_range(start, end, freq="D")
df = pd.DataFrame(
{"date": date_range, "day": np.arange(date_range.size), "richness": 0}
)
for i, row in df.iterrows():
date = row["date"]
subdf = images[images["date"] <= date]
richness = subdf["scientific_name"].unique().size
row["richness"] = richness
df.loc[i] = row
fig = px.line(
df, x="day", y="richness", labels={"day": "Día", "richness": "Riqueza"}
)
return fig
def _plot_site_dates(deployments):
deployments = deployments.copy()
deployments["start_date"] = pd.to_datetime(deployments["start_date"])
deployments["end_date"] = pd.to_datetime(deployments["end_date"])
df = pd.melt(
deployments, id_vars="deployment_id", value_vars=["start_date", "end_date"]
)
df = df.sort_values(["value"], ascending=True)
fig = px.line(
df,
x="value",
y="deployment_id",
color="deployment_id",
color_discrete_sequence=["#636EFA"],
labels={"value": "Fecha", "deployment_id": "Evento"},
)
fig.update_layout(showlegend=False)
return fig
def _plot_activity_hours(images, names):
images = images.copy()
images["hour"] = pd.to_datetime(images["timestamp"]).dt.round("H").dt.hour
df = pd.DataFrame(columns=["x", "y", "name"])
x_range = np.linspace(0, 24, 1000)
for name in names:
hours = images[images["scientific_name"] == name]["hour"].to_numpy()
if np.unique(hours).size > 1:
kde = gaussian_kde(hours)
df = df.append(
pd.DataFrame(
{"x": x_range, "y": kde(x_range), "name": [name] * x_range.size}
),
ignore_index=True,
)
if not df.empty:
fig = px.line(
df, x="x", y="y", color="name", labels={"x": "Hora", "y": "Densidad"}
)
ticks = [i for i in range(0, 25) if i % 2 == 0]
fig.update_layout(
xaxis=dict(
tickmode="array",
tickvals=ticks,
ticktext=list(map(lambda x: f"{str(x).zfill(2)}:00", ticks)),
),
legend_title_text="",
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
)
else:
fig = go.Figure()
return fig
def _plot_presence_absence(images, deployments, name):
images = images.copy()
images = images[images["scientific_name"] == name]
images["timestamp"] = pd.to_datetime(images["timestamp"])
images["date"] = pd.to_datetime(images["timestamp"].dt.date)
start = pd.to_datetime(deployments["start_date"]).min()
end = pd.to_datetime(deployments["end_date"]).max()
date_range = pd.date_range(start, end, freq="D")
deployment_ids = sorted(deployments["deployment_id"].unique())
df = pd.DataFrame(index=deployment_ids, columns=np.arange(date_range.size))
for id_ in deployment_ids:
subdf = images[images["deployment_id"] == id_]
if subdf.empty:
df.loc[id_] = 0
df.loc[id_] = date_range.isin(subdf["date"]).astype(int)
fig = px.imshow(df, labels={"x": "Día", "y": "Evento", "color": "Presencia"})
return fig
def generate_callbacks(app):
"""
"""
@app.callback(
Output("store", "data"),
Output("project-name", "children"),
Output("project-sites", "children"),
Output("project-images", "children"),
Output("fig-species-list-1", "options"),
Output("fig-species-list-2", "options"),
Output("data-check", "className"),
Output("data-check-tooltip", "style"),
Input("upload", "contents"),
State("upload", "filename"),
State("remove-duplicates", "value"),
State("remove-duplicates-interval", "value"),
)
def store_project(content, name, remove_duplicates, remove_duplicates_interval):
if content is not None:
string = content.split(",")[1]
decoded = base64.b64decode(string)
z = ZipFile(io.BytesIO(decoded))
stem = pathlib.Path(name).stem
try:
images = pd.read_csv(io.BytesIO(z.read(f"{stem}/images.csv")))
images = wiutils.remove_unidentified(images, rank="genus")
images["scientific_name"] = wiutils.get_scientific_name(
images, keep_genus=True, add_qualifier=True
)
if remove_duplicates and remove_duplicates_interval:
images = wiutils.remove_duplicates(
images, interval=remove_duplicates_interval, unit="minutes"
)
deployments = pd.read_csv(io.BytesIO(z.read(f"{stem}/deployments.csv")))
projects = pd.read_csv(io.BytesIO(z.read(f"{stem}/projects.csv")))
except KeyError:
return None, "", "", "", [], [], "fas fa-times-circle", {"display": "float"}
reference = pd.read_csv(
pathlib.Path(__file__)
.parents[1]
.joinpath("assets/reference.csv")
.as_posix()
)
data = {
"images": images.to_json(orient="split"),
"deployments": deployments.to_json(orient="split"),
"projects": projects.to_json(orient="split"),
"reference": reference.to_json(orient="split"),
}
name = projects.loc[0, "project_name"]
sites = pd.read_json(data["deployments"], orient="split").shape[0]
nimages = images.shape[0]
options = [
{"label": name, "value": name}
for name in images["scientific_name"].dropna().sort_values().unique()
]
return data, name, sites, nimages, options, options, "fas fa-check-circle", {"display": "none"}
else:
return None, "", "", "", [], [], "", {}
@app.callback(
Output("data-table", "columns"),
Output("data-table", "data"),
Output("data-table-wrapper", "style"),
Output("graph", "figure"),
Output("graph-wrapper", "style"),
Input("general-count", "n_clicks"),
Input("dwc-events", "n_clicks"),
Input("dwc-records", "n_clicks"),
Input("deployment-detection", "n_clicks"),
Input("detection-history", "n_clicks"),
Input("hill-numbers", "n_clicks"),
Input("deployment-summary", "n_clicks"),
Input("accumulation-curve", "n_clicks"),
Input("site-dates", "n_clicks"),
Input("activity-hours", "n_clicks"),
Input("presence-absence", "n_clicks"),
State("store", "data"),
State("general-count-add-taxonomy", "value"),
State("general-count-threat-status", "value"),
State("general-count-endemic", "value"),
State("dwc-lang", "value"),
State("deployment-detection-compute-abundance", "value"),
State("deployment-detection-pivot", "value"),
State("detection-history-days", "value"),
State("detection-history-date-range", "value"),
State("detection-history-compute-abundance", "value"),
State("detection-history-pivot", "value"),
State("hill-numbers-q", "value"),
State("hill-numbers-pivot", "value"),
State("fig-species-list-1", "value"),
State("fig-species-list-2", "value"),
)
def execute(
btn1,
btn2,
btn3,
btn4,
btn5,
btn6,
btn7,
btn8,
btn9,
btn10,
btn11,
data,
general_count_add_taxonomy,
general_count_threat_status,
general_count_endemic,
dwc_lang,
deployment_detection_compute_abundance,
deployment_detection_pivot,
detection_history_days,
detection_history_date_range,
detection_history_compute_abundance,
detection_history_pivot,
hill_numbers_q,
hill_numbers_pivot,
names,
name,
):
ctx = dash.callback_context
if not ctx.triggered:
return None, None, {}, {}, {}
else:
id_ = ctx.triggered[0]["prop_id"].split(".")[0]
images = pd.read_json(data["images"], orient="split")
deployments = | pd.read_json(data["deployments"], orient="split") | pandas.read_json |
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
import csv
import pandas as pd
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
MINIMUM = -5
MAXIMUM = 5
OUTPUT_FILENAME = "addition.csv"
HEADER = ["x", "y", "x+y"]
def main():
print("Hello, Addition!")
data = []
for x in range(MINIMUM, MAXIMUM+1):
for y in range(MINIMUM, MAXIMUM+1):
data.append((x, y, x+y))
print("Entries:", len(data))
# [print(entry) for entry in data]
# with open(OUTPUT_FILENAME, "w", newline="") as csv_file:
# writer = csv.writer(csv_file, delimiter=",")
# writer.writerow(HEADER)
# for entry in data:
# writer.writerow(entry)
x = [entry[0] for entry in data]
y = [entry[1] for entry in data]
z = [entry[2] for entry in data]
# data = {'Unemployment_Rate': [6.1,5.8,5.7,5.7,5.8,5.6,5.5,5.3,5.2,5.2],
# 'Stock_Index_Price': [1500,1520,1525,1523,1515,1540,1545,1560,1555,1565]
# }
# df = pd.DataFrame(data,columns=['Unemployment_Rate','Stock_Index_Price'])
df = | pd.DataFrame({"x": x, "y": y, "z": z}) | pandas.DataFrame |
import pandas as pd
def preprocess_repo2(df,asset,Trading_env):
df = df.to_frame()
index = df.index.strftime('%Y-%m-%d')
index = [str.replace("-", "") for str in index]
liste = []
for i in index:
liste.append(i)
liste.append(i)
df_context = Trading_env.preprocess_context_data(asset)
df['gold'] = df_context.iloc[:,0]
df['interest'] = df_context.iloc[:,1]
df['index'] = df_context.iloc[:,2]
df['similar'] = df_context.iloc[:,3]
df['vix'] = df_context.iloc[:,4]
columns = df.columns.values.tolist()
columns[0] = 'adjcp'
df.columns = columns
#df = df.rename(columns={'AAPL': 'adjcp'})
df = df.reset_index(drop=True)
df.index = range(0, len(df) * 2, 2)
for i in range(1, len(df) * 2, 2):
line = pd.DataFrame({"gold": 1, "interest": 1, "index": 1, "similar": 1, 'adjcp': 1, 'vix':1}, index=[i])
df = df.append(line, ignore_index=False)
df = df.sort_index().reset_index(drop=True)
df["datadate"] = liste
liste = []
for i in range(0, len(df) // 2):
liste.append(i)
liste.append(i)
df.index = liste
df["datadate"] = pd.to_numeric(df["datadate"])
fold1 = df[(df.datadate > 20100103) & (df.datadate <= 20161231)]
fold2 = df[(df.datadate > 20170101) & (df.datadate <= 20171231)]
fold3 = df[(df.datadate > 20180101) & (df.datadate <= 20181231)]
fold4 = df[(df.datadate > 20190101) & (df.datadate <= 20191231)]
ind1, ind2, ind3 = [], [], []
longerfold = fold1.append(fold2)
for i in range(0, len(fold1) // 2):
ind1.append(i)
ind1.append(i)
for i in range(0, len(fold2) // 2):
ind2.append(i)
ind2.append(i)
for i in range(0, len(longerfold) // 2):
ind3.append(i)
ind3.append(i)
fold1.index = ind1
fold2.index = ind2
try:
fold3.index = ind2[:len(fold3.index)]
fold4.index = ind2[:len(fold4.index)]
except ValueError:
fold3.index = ind2[:len(fold3.index)]+[len(fold2) // 2 +2,len(fold2) // 2+2,len(fold2) // 2 +3,len(fold2) // 2+3]
longerfold.index = ind3
return [[fold1, fold2, fold3], [longerfold, fold3, fold4]]
def merge_folds_test(fold1,fold2):
longerfold = fold1.append(fold2)
ind3 = []
for i in range(0, len(longerfold) // 2):
ind3.append(i)
ind3.append(i)
longerfold.index = ind3
return longerfold
def preprocess_repo2_corona(df,asset,Trading_env):
df = df.to_frame()
index = df.index.strftime('%Y-%m-%d')
index = [str.replace("-", "") for str in index]
liste = []
for i in index:
liste.append(i)
liste.append(i)
df_context = Trading_env.preprocess_context_data(asset, True)
df['gold'] = df_context.iloc[:,0]
df['interest'] = df_context.iloc[:,1]
df['index'] = df_context.iloc[:,2]
df['similar'] = df_context.iloc[:,3]
df['vix'] = df_context.iloc[:,4]
columns = df.columns.values.tolist()
columns[0] = 'adjcp'
df.columns = columns
#df = df.rename(columns={'AAPL': 'adjcp'})
df = df.reset_index(drop=True)
df.index = range(0, len(df) * 2, 2)
for i in range(1, len(df) * 2, 2):
line = pd.DataFrame({"gold": 1, "interest": 1, "index": 1, "similar": 1, 'adjcp': 1, 'vix':1}, index=[i])
df = df.append(line, ignore_index=False)
df = df.sort_index().reset_index(drop=True)
df["datadate"] = liste
liste = []
for i in range(0, len(df) // 2):
liste.append(i)
liste.append(i)
df.index = liste
df["datadate"] = | pd.to_numeric(df["datadate"]) | pandas.to_numeric |
#!/usr/bin/env python
ARTICLE_URL_FILE = 'harpers-later-urls.json'
OUTPUT_FILE = 'harpers-pdf-links.json'
WORKER_THREADS = 16
import json
import datetime
import dateutil.parser
import sys
from dataclasses import dataclass, field
from dataclasses_json import dataclass_json
from datetime import datetime
from newspaper import Article, Config
from bs4 import BeautifulSoup
from typing import List
from queue import Queue
from threading import Thread
from pathlib import Path
import pandas as pd
import re
import time
from urllib.request import Request, urlopen
@dataclass_json
@dataclass
class HarpersPDF:
title: str = ''
url: str = ''
link: str = ''
publication: str = 'harpers'
model_publication: str = 'target'
class WriteThread(Thread):
def __init__(self, queue: Queue, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = queue
def run(self):
output_file_path = Path(OUTPUT_FILE)
if output_file_path.is_file():
with open(OUTPUT_FILE, 'r') as output_file:
existing_info = json.loads(output_file.read())
else:
existing_info = []
i = 0
while True:
current_pdf = self.queue.get()
if current_pdf is None:
check_df = | pd.DataFrame(existing_info) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
try:
from itertools import izip as zip
except ImportError: # python 3.x
pass
from itertools import count
import os
import numpy as np
import pandas as pd
from train.job_info import parse_model_infos
from util import *
from .model_io import get_model_info
class Ensemble(object):
def __init__(self, models):
self._models = models
def predict_aggregated(self, df, last_rows=None, reducer=np.mean):
debug('Models=%d last_rows=%d' % (len(self._models), last_rows))
changes = [Ensemble.predict_changes_for_model(model_info, df, last_rows) for model_info in self._models]
changes = np.array(changes)
vlog('Predicted changes:', changes.shape)
vlog2('Predicted values:\n', changes[:, :6])
return reducer(changes, axis=0)
@staticmethod
def predict_changes_for_model(model_info, df, last_rows=None):
run_params = model_info.run_params
model = model_info.model_class(**model_info.model_params)
x = to_dataset_for_prediction(df, run_params['k'], model_info.model_class.DATA_WITH_BIAS)
if last_rows is None:
assert run_params['k'] <= 100, 'One of the models is using k=%d. Set last rows manually' % run_params['k']
last_rows = df.shape[0] - 100 # 100 is max k
assert last_rows <= x.shape[0], 'Last rows is too large. Actual rows: %d' % x.shape[0]
x = x[-last_rows:] # take only the last `last_rows` rows
vlog('Input for prediction:', x.shape)
with model.session():
model.restore(model_info.path)
predicted_changes = model.predict(x)
vlog('Predicted:', predicted_changes.shape, ' for model:', model_info.path)
vlog2('Predicted values:', predicted_changes[:20])
return predicted_changes
@staticmethod
def ensemble_top_models(job_info, top_n=5):
home_dir = os.path.join(job_info.zoo_dir, '%s_%s' % (job_info.ticker, job_info.period))
models = parse_model_infos(home_dir)
models.sort(key=lambda d: d['eval'])
model_paths = [os.path.join(home_dir, d['name']) for d in models]
models = [get_model_info(path, strict=False) for path in model_paths]
top_models = [model for model in models if model.is_available()][:top_n]
return Ensemble(top_models)
def predict_multiple(job_info, raw_df, rows_to_predict, top_models_num=5):
debug('Predicting %s target=%s' % (job_info.name, job_info.target))
raw_targets = raw_df[job_info.target][-(rows_to_predict + 1):].reset_index(drop=True)
changes_df = to_changes(raw_df)
target_changes = changes_df[job_info.target][-rows_to_predict:].reset_index(drop=True)
dates = changes_df.date[-rows_to_predict:].reset_index(drop=True)
df = changes_df[:-1] # the data for models is shifted by one: the target for the last row is unknown
ensemble = Ensemble.ensemble_top_models(job_info, top_n=top_models_num)
predictions = ensemble.predict_aggregated(df, last_rows=rows_to_predict)
result = []
for idx, date, prediction_change, target_change in zip(count(), dates, predictions, target_changes):
debug('%%-change on %s: predict=%+.5f target=%+.5f' % (date, prediction_change, target_change))
# target_change is approx. raw_targets[idx + 1] / raw_targets[idx] - 1.0
raw_target = raw_targets[idx + 1]
raw_predicted = (1 + prediction_change) * raw_targets[idx]
debug(' value on %s: predict= %.5f target= %.5f' % (date, raw_predicted, raw_target))
result.append({'Time': date, 'Prediction': raw_predicted, 'True': raw_target})
result_df = | pd.DataFrame(result) | pandas.DataFrame |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Acquires "real" datasets from scikit-learn.
Obtains 20 Newsgroups, Sentiment Labelled Sentences, and MNIST.
"""
import csv
import pickle
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle as shuffle_arrays
import tensorflow.compat.v1 as tf
FILE_PATH = 'trees/raw_data'
RANDOM_STATE = 109971161161043253 % 8085
class TwentyNewsgroups(object):
"""The 20 Newsgroups text classification dataset.
Very informative text (headers, footers, and quotes) has been removed to make
the classification problem more challenging.
"""
def __init__(self, vectorizer='tf-idf'):
"""Initialize the dataset.
Args:
vectorizer: str text vectorization method; values = 'bow' (bag of words),
'binary-bow' (binary bag of words), 'tf-idf'
"""
self.vectorizer = vectorizer
def get(self):
"""Gets the 20 Newsgroups dataset (sparse).
Returns:
x_train: scipy.sparse.*matrix
array of features of training data
y_train: np.array
1-D array of class labels of training data
x_test: scipy.sparse.*matrix
array of features of test data
y_test: np.array
1-D array of class labels of the test data
"""
# fix doesn't work
data_path = '{}/{}'.format(FILE_PATH, '20news')
with tf.gfile.GFile('{}/{}'.format(data_path, 'train.pkl'), 'r') as f:
train = pickle.load(f)
with tf.gfile.GFile('{}/{}'.format(data_path, 'test.pkl'), 'r') as f:
test = pickle.load(f)
x_train = train.data
y_train = train.target
x_test = test.data
y_test = test.target
x_train, x_test = vectorize_text(x_train, x_test, method=self.vectorizer)
return x_train, y_train, x_test, y_test
class SentimentSentences(object):
"""The Sentiment Labelled Sentences text classification dataset."""
def __init__(self, vectorizer='tf-idf'):
"""Initialize the dataset.
Args:
vectorizer: str text vectorization method; values = 'bow' (bag of words),
'binary-bow' (binary bag of words), 'tf-idf'
"""
self.vectorizer = vectorizer
def get(self):
"""Gets the Sentiment Labelled Sentences dataset (sparse).
Returns:
x_train: scipy.sparse.*matrix
array of features of training data
y_train: np.array
1-D array of class labels of training data
x_test: scipy.sparse.*matrix
array of features of test data
y_test: np.array
1-D array of class labels of the test data
"""
data_path = '{}/{}'.format(FILE_PATH, 'sentiment_sentences')
with tf.gfile.GFile('{}/{}'.format(data_path, 'amazon_cells_labelled.txt'),
'r') as f:
amazon_df = pd.read_csv(f, sep='\t', header=None, quoting=csv.QUOTE_NONE)
with tf.gfile.GFile('{}/{}'.format(data_path, 'imdb_labelled.txt'),
'r') as f:
imdb_df = pd.read_csv(f, sep='\t', header=None, quoting=csv.QUOTE_NONE)
with tf.gfile.GFile('{}/{}'.format(data_path, 'yelp_labelled.txt'),
'r') as f:
yelp_df = pd.read_csv(f, sep='\t', header=None, quoting=csv.QUOTE_NONE)
df = | pd.concat([amazon_df, imdb_df, yelp_df]) | pandas.concat |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, | pd.Series([0, 0, 0, 0, 4]) | pandas.Series |
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponseRedirect, JsonResponse
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.views.generic import ListView, CreateView, UpdateView
from django.urls import reverse_lazy
from .models import *
from .forms import CreateUserForm
from django.contrib.auth.forms import UserCreationForm
from distutils.util import strtobool
from django.contrib.auth.models import User
from django.contrib import messages
import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import pandas as pd
from verify_email.email_handler import send_verification_email # pip install Django-Verify-Email
from django.core import mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.contrib.auth import get_user_model
from core.functions import DistanceH, mainFunction
def dataHome(request, user_id):
distance_df = mainFunction()
parSub = parentSubject.objects.values()
df_parsub = | pd.DataFrame(parSub) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm
sys.path.append('../mss')
# from mssmain import batch_scans, peak_list
path = input('Please input the mzml file path:')
noise_thres = int(input('Please input the noise threshold for ms1 spectrum:'))
score_switch = input('please define if enable the peak score(Y/N):')
if score_switch == 'Y':
model_score = True
elif score_switch == 'N':
model_score = False
else:
print('invalid selection')
model_score = False
rt_error = float(input('please define the rt error:'))
mz_error = float(input('please define the mz error:'))
export_path = input('export path:')
export_name = input('export name:')
print('Reading mzml files...')
batch_scan, file_list = batch_scans(path, True, noise_thres)
print('Processing peak list...')
d_peak = []
for i in range(len(batch_scan)):
print('Processing', str(int(i + 1)), 'out of ', len(batch_scan), 'file')
d_result = peak_list(batch_scan[i], 20, enable_score=model_score)
d_peak.append(d_result)
def stack(d_batch):
"""This function compiles all samples files into one dataframe
for analysis. It takes in files that are of the .txt type."""
all_samples = []
# Calculating the number of files in path
num_files = len(d_batch)
for i in range(num_files):
sample = d_batch[i]
sample_df = pd.DataFrame(sample, columns=['rt',
'm/z', 'sn', 'score', 'peak area'],
dtype=np.float32)
all_samples += [sample_df]
# Combining all the dataframes into one
total_samples = pd.concat(all_samples)
all_samples.clear()
# Cleaning up the data before processing
total_samples.loc[total_samples.sn == 0, 'sn'] = float('inf')
total_samples.loc[total_samples.score == 3.0, 'score'] = 0.1
total_samples.loc[total_samples.score == 2.0, 'score'] = 0.6
print("Process completed!")
return num_files, total_samples
def alignment(d_batch, export_name, rt_error, MZ_error):
"""This function works by using one .txt file as a reference in which
other files realigned to in terms of precursor and RT. """
RT_error = rt_error # units of minutes, can be adjusted
alignment_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from learntools.core import *
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
# Load some other datasets used in this exercise
gaming_products = pd.read_csv("../input/things-on-reddit/top-things/top-things/reddits/g/gaming.csv")
gaming_products['subreddit'] = "r/gaming"
movie_products = | pd.read_csv("../input/things-on-reddit/top-things/top-things/reddits/m/movies.csv") | pandas.read_csv |
"""
Test AR Model
"""
import datetime as dt
from itertools import product
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pandas as pd
from pandas import Index, Series, date_range, period_range
from pandas.testing import assert_series_equal
import pytest
from statsmodels.datasets import macrodata, sunspots
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.tests.results import results_ar
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
def gen_ar_data(nobs):
rs = np.random.RandomState(982739)
idx = pd.date_range(dt.datetime(1900, 1, 1), freq="M", periods=nobs)
return pd.Series(rs.standard_normal(nobs), index=idx), rs
def gen_ols_regressors(ar, seasonal, trend, exog):
nobs = 500
y, rs = gen_ar_data(nobs)
maxlag = ar if isinstance(ar, int) else max(ar)
reg = []
if "c" in trend:
const = pd.Series(np.ones(nobs), index=y.index, name="const")
reg.append(const)
if "t" in trend:
time = np.arange(1, nobs + 1)
time = pd.Series(time, index=y.index, name="time")
reg.append(time)
if isinstance(ar, int) and ar:
lags = np.arange(1, ar + 1)
elif ar == 0:
lags = None
else:
lags = ar
if seasonal:
seasons = np.zeros((500, 12))
for i in range(12):
seasons[i::12, i] = 1
cols = ["s.{0}".format(i) for i in range(12)]
seasons = pd.DataFrame(seasons, columns=cols, index=y.index)
if "c" in trend:
seasons = seasons.iloc[:, 1:]
reg.append(seasons)
if maxlag:
for lag in lags:
reg.append(y.shift(lag))
if exog:
x = rs.standard_normal((nobs, exog))
cols = ["x.{0}".format(i) for i in range(exog)]
x = pd.DataFrame(x, columns=cols, index=y.index)
reg.append(x)
else:
x = None
reg.insert(0, y)
df = pd.concat(reg, axis=1).dropna()
endog = df.iloc[:, 0]
exog = df.iloc[:, 1:]
return y, x, endog, exog
ar = [0, 3, [1, 3], [3]]
seasonal = [True, False]
trend = ["n", "c", "t", "ct"]
exog = [None, 2]
covs = ["nonrobust", "HC0"]
params = list(product(ar, seasonal, trend, exog, covs))
final = []
for param in params:
if param[0] != 0 or param[1] or param[2] != "n" or param[3]:
final.append(param)
params = final
names = ("AR", "Seasonal", "Trend", "Exog", "Cov Type")
ids = [
", ".join([n + ": " + str(p) for n, p in zip(names, param)])
for param in params
]
@pytest.fixture(scope="module", params=params, ids=ids)
def ols_autoreg_result(request):
ar, seasonal, trend, exog, cov_type = request.param
y, x, endog, exog = gen_ols_regressors(ar, seasonal, trend, exog)
ar_mod = AutoReg(y, ar, seasonal=seasonal, trend=trend, exog=x)
ar_res = ar_mod.fit(cov_type=cov_type)
ols = OLS(endog, exog)
ols_res = ols.fit(cov_type=cov_type, use_t=False)
return ar_res, ols_res
attributes = [
"bse",
"cov_params",
"df_model",
"df_resid",
"fittedvalues",
"llf",
"nobs",
"params",
"resid",
"scale",
"tvalues",
"use_t",
]
def fix_ols_attribute(val, attrib, res):
"""
fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov
"""
nparam = res.k_constant + res.df_model
nobs = nparam + res.df_resid
df_correction = (nobs - nparam) / nobs
if attrib in ("scale",):
return val * df_correction
elif attrib == "df_model":
return val + res.k_constant
elif res.cov_type != "nonrobust":
return val
elif attrib in ("bse", "conf_int"):
return val * np.sqrt(df_correction)
elif attrib in ("cov_params", "scale"):
return val * df_correction
elif attrib in ("f_test",):
return val / df_correction
elif attrib in ("tvalues",):
return val / np.sqrt(df_correction)
return val
@pytest.mark.parametrize("attribute", attributes)
def test_equiv_ols_autoreg(ols_autoreg_result, attribute):
a, o = ols_autoreg_result
ols_a = getattr(o, attribute)
ar_a = getattr(a, attribute)
if callable(ols_a):
ols_a = ols_a()
ar_a = ar_a()
ols_a = fix_ols_attribute(ols_a, attribute, o)
assert_allclose(ols_a, ar_a)
def test_conf_int_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
a_ci = a.conf_int()
o_ci = o.conf_int()
if o.cov_type == "nonrobust":
spread = o_ci.T - o.params
spread = fix_ols_attribute(spread, "conf_int", o)
o_ci = (spread + o.params).T
assert_allclose(a_ci, o_ci)
def test_f_test_ols_autoreg(ols_autoreg_result):
a, o = ols_autoreg_result
r = np.eye(a.params.shape[0])
a_f = a.f_test(r).fvalue
o_f = o.f_test(r).fvalue
o_f = fix_ols_attribute(o_f, "f_test", o)
assert_allclose(a_f, o_f)
@pytest.mark.smoke
def test_other_tests_autoreg(ols_autoreg_result):
a, _ = ols_autoreg_result
r = np.ones_like(a.params)
a.t_test(r)
r = np.eye(a.params.shape[0])
a.wald_test(r)
# TODO: test likelihood for ARX model?
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params()))
# no dof correction for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from io import BytesIO
fh = BytesIO()
# test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0, 0)
res_unpickled = self.res1.__class__.load(fh)
assert type(res_unpickled) is type(self.res1) # noqa: E721
@pytest.mark.smoke
def test_summary(self):
assert isinstance(self.res1.summary().as_text(), str)
@pytest.mark.smoke
def test_pvalues(self):
assert isinstance(self.res1.pvalues, (np.ndarray, pd.Series))
params = product(
[0, 1, 3, [1, 3]],
["n", "c", "t", "ct"],
[True, False],
[0, 2],
[None, 11],
["none", "drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back{7}"""
ids = [param_fmt.format(*param) for param in params]
def gen_data(nobs, nexog, pandas, seed=92874765):
rs = np.random.RandomState(seed)
endog = rs.standard_normal((nobs))
exog = rs.standard_normal((nobs, nexog)) if nexog else None
if pandas:
index = pd.date_range(
dt.datetime(1999, 12, 31), periods=nobs, freq="M"
)
endog = pd.Series(endog, name="endog", index=index)
if nexog:
cols = ["exog.{0}".format(i) for i in range(exog.shape[1])]
exog = pd.DataFrame(exog, columns=cols, index=index)
from collections import namedtuple
DataSet = namedtuple("DataSet", ["endog", "exog"])
return DataSet(endog=endog, exog=exog)
@pytest.fixture(scope="module", params=params, ids=ids)
def ar_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.fixture(scope="module")
def ar2(request):
gen = np.random.RandomState(20210623)
e = gen.standard_normal(52)
y = 10 * np.ones_like(e)
for i in range(2, y.shape[0]):
y[i] = 1 + 0.5 * y[i - 1] + 0.4 * y[i - 2] + e[i]
index = pd.period_range("2000-01-01", periods=e.shape[0] - 2, freq="M")
return pd.Series(y[2:], index=index)
params = product(
[0, 3, [1, 3]],
["c"],
[True, False],
[0],
[None, 11],
["drop"],
[True, False],
[None, 12],
)
params = list(params)
params = [
param
for param in params
if (param[0] or param[1] != "n" or param[2] or param[3])
]
params = [
param
for param in params
if not param[2] or (param[2] and (param[4] or param[6]))
]
param_fmt = """\
lags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \
missing: {5}, pandas: {6}, hold_back: {7}"""
ids = [param_fmt.format(*param) for param in params]
# Only test 1/3 to save time
@pytest.fixture(scope="module", params=params[::3], ids=ids[::3])
def plot_data(request):
lags, trend, seasonal = request.param[:3]
nexog, period, missing, use_pandas, hold_back = request.param[3:]
data = gen_data(250, nexog, use_pandas)
return Bunch(
trend=trend,
lags=lags,
seasonal=seasonal,
period=period,
endog=data.endog,
exog=data.exog,
missing=missing,
hold_back=hold_back,
)
@pytest.mark.matplotlib
@pytest.mark.smoke
def test_autoreg_smoke_plots(plot_data, close_figures):
from matplotlib.figure import Figure
mod = AutoReg(
plot_data.endog,
plot_data.lags,
trend=plot_data.trend,
seasonal=plot_data.seasonal,
exog=plot_data.exog,
hold_back=plot_data.hold_back,
period=plot_data.period,
missing=plot_data.missing,
)
res = mod.fit()
fig = res.plot_diagnostics()
assert isinstance(fig, Figure)
if plot_data.exog is None:
fig = res.plot_predict(end=300)
assert isinstance(fig, Figure)
fig = res.plot_predict(end=300, alpha=None, in_sample=False)
assert isinstance(fig, Figure)
assert isinstance(res.summary(), Summary)
@pytest.mark.smoke
def test_autoreg_predict_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
exog_oos = None
if ar_data.exog is not None:
exog_oos = np.empty((1, ar_data.exog.shape[1]))
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
if ar_data.lags == 0 and ar_data.exog is None:
mod.predict(res.params, 0, 350, exog_oos=exog_oos)
if isinstance(ar_data.endog, pd.Series) and (
not ar_data.seasonal or ar_data.period is not None
):
ar_data.endog.index = list(range(ar_data.endog.shape[0]))
if ar_data.exog is not None:
ar_data.exog.index = list(range(ar_data.endog.shape[0]))
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
period=ar_data.period,
missing=ar_data.missing,
)
mod.predict(res.params, 0, 250, exog_oos=exog_oos)
@pytest.mark.matplotlib
def test_parameterless_autoreg():
data = gen_data(250, 0, False)
mod = AutoReg(data.endog, 0, trend="n", seasonal=False, exog=None)
res = mod.fit()
for attr in dir(res):
if attr.startswith("_"):
continue
# TODO
if attr in (
"predict",
"f_test",
"t_test",
"initialize",
"load",
"remove_data",
"save",
"t_test",
"t_test_pairwise",
"wald_test",
"wald_test_terms",
):
continue
attr = getattr(res, attr)
if callable(attr):
attr()
else:
assert isinstance(attr, object)
def test_predict_errors():
data = gen_data(250, 2, True)
mod = AutoReg(data.endog, 3)
res = mod.fit()
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog=data.exog)
with pytest.raises(ValueError, match="exog and exog_oos cannot be used"):
mod.predict(res.params, exog_oos=data.exog)
with pytest.raises(ValueError, match="hold_back must be >= lags"):
AutoReg(data.endog, 3, hold_back=1)
with pytest.raises(ValueError, match="freq cannot be inferred"):
AutoReg(data.endog.values, 3, seasonal=True)
mod = AutoReg(data.endog, 3, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match=r"The shape of exog \(200, 2\)"):
mod.predict(res.params, exog=data.exog.iloc[:200])
with pytest.raises(ValueError, match="The number of columns in exog_oos"):
mod.predict(res.params, exog_oos=data.exog.iloc[:, :1])
with pytest.raises(ValueError, match="Prediction must have `end` after"):
mod.predict(res.params, start=200, end=199)
with pytest.raises(ValueError, match="exog_oos must be provided"):
mod.predict(res.params, end=250, exog_oos=None)
mod = AutoReg(data.endog, 0, exog=data.exog)
res = mod.fit()
with pytest.raises(ValueError, match="start and end indicate that 10"):
mod.predict(res.params, end=259, exog_oos=data.exog.iloc[:5])
def test_spec_errors():
data = gen_data(250, 2, True)
with pytest.raises(ValueError, match="lags must be a non-negative scalar"):
AutoReg(data.endog, -1)
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, 1, 1])
with pytest.raises(ValueError, match="All values in lags must be pos"):
AutoReg(data.endog, [1, -2, 3])
@pytest.mark.smoke
def test_dynamic_forecast_smoke(ar_data):
mod = AutoReg(
ar_data.endog,
ar_data.lags,
trend=ar_data.trend,
seasonal=ar_data.seasonal,
exog=ar_data.exog,
hold_back=ar_data.hold_back,
period=ar_data.period,
missing=ar_data.missing,
)
res = mod.fit()
res.predict(dynamic=True)
if ar_data.exog is None:
res.predict(end=260, dynamic=True)
@pytest.mark.smoke
def test_ar_select_order_smoke():
data = sunspots.load().data["SUNACTIVITY"]
ar_select_order(data, 4, glob=True, trend="n")
ar_select_order(data, 4, glob=False, trend="n")
ar_select_order(data, 4, seasonal=True, period=12)
ar_select_order(data, 4, seasonal=False)
ar_select_order(data, 4, glob=True)
ar_select_order(data, 4, glob=True, seasonal=True, period=12)
class CheckAutoRegMixin(CheckARMixin):
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse_stata, DECIMAL_6)
class TestAutoRegOLSConstant(CheckAutoRegMixin):
"""
Test AutoReg fit by OLS with a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
data.endog.index = list(range(len(data.endog)))
cls.res1 = AutoReg(data.endog, lags=9).fit()
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
class TestAutoRegOLSNoConstant(CheckAutoRegMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setup_class(cls):
data = sunspots.load()
cls.res1 = AutoReg(np.asarray(data.endog), lags=9, trend="n").fit()
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSnneg1start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=100),
self.res2.FVOLSnneg1start100,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params)[model.hold_back :],
self.res2.FVOLSdefault,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312,
DECIMAL_4,
)
assert_almost_equal(
model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312,
DECIMAL_4,
)
@pytest.mark.parametrize("lag", list(np.arange(1, 16 + 1)))
def test_autoreg_info_criterion(lag):
data = sunspots.load()
endog = np.asarray(data.endog)
endog_tmp = endog[16 - lag :]
r = AutoReg(endog_tmp, lags=lag).fit()
# See issue #324 for the corrections vs. R
aic = r.aic
hqic = r.hqic
bic = r.bic
res1 = np.array([aic, hqic, bic, r.fpe])
# aic correction to match R
res2 = results_ar.ARLagResults("const").ic.T
comp = res2[lag - 1, :].copy()
k = 2 + lag
pen = np.array([2, 2 * np.log(np.log(r.nobs)), np.log(r.nobs)])
comp[:3] = -2 * r.llf + pen * k
assert_almost_equal(res1, comp, DECIMAL_6)
r2 = AutoReg(endog, lags=lag, hold_back=16).fit()
assert_allclose(r.aic, r2.aic)
assert_allclose(r.bic, r2.bic)
assert_allclose(r.hqic, r2.hqic)
assert_allclose(r.fpe, r2.fpe)
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_named_series(reset_randomstate, old_names):
warning = FutureWarning if old_names else None
dates = period_range(start="2011-1", periods=72, freq="M")
y = Series(np.random.randn(72), name="foobar", index=dates)
with pytest.warns(warning):
results = AutoReg(y, lags=2, old_names=old_names).fit()
if old_names:
idx = Index(["intercept", "foobar.L1", "foobar.L2"])
else:
idx = Index(["const", "foobar.L1", "foobar.L2"])
assert results.params.index.equals(idx)
@pytest.mark.smoke
def test_autoreg_series():
# GH#773
dta = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(dta), freq="Q")
dta.index = dates
ar = AutoReg(dta, lags=15).fit()
ar.bse
def test_ar_order_select():
# GH#2118
np.random.seed(12345)
y = arma_generate_sample([1, -0.75, 0.3], [1], 100)
ts = Series(
y,
index=date_range(start=dt.datetime(1990, 1, 1), periods=100, freq="M"),
)
res = ar_select_order(ts, maxlag=12, ic="aic")
assert tuple(res.ar_lags) == (1, 2)
assert isinstance(res.aic, dict)
assert isinstance(res.bic, dict)
assert isinstance(res.hqic, dict)
assert isinstance(res.model, AutoReg)
assert not res.seasonal
assert res.trend == "c"
assert res.period is None
def test_autoreg_constant_column_trend():
sample = np.array(
[
0.46341460943222046,
0.46341460943222046,
0.39024388790130615,
0.4146341383457184,
0.4146341383457184,
0.4146341383457184,
0.3414634168148041,
0.4390243887901306,
0.46341460943222046,
0.4390243887901306,
]
)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7)
with pytest.raises(ValueError, match="The model specification cannot"):
AutoReg(sample, lags=7, trend="n")
@pytest.mark.parametrize("old_names", [True, False])
def test_autoreg_summary_corner(old_names):
data = macrodata.load_pandas().data["cpi"].diff().dropna()
dates = period_range(start="1959Q1", periods=len(data), freq="Q")
data.index = dates
warning = FutureWarning if old_names else None
with pytest.warns(warning):
res = AutoReg(data, lags=4, old_names=old_names).fit()
summ = res.summary().as_text()
assert "AutoReg(4)" in summ
assert "cpi.L4" in summ
assert "03-31-1960" in summ
with pytest.warns(warning):
res = AutoReg(data, lags=0, old_names=old_names).fit()
summ = res.summary().as_text()
if old_names:
assert "intercept" in summ
else:
assert "const" in summ
assert "AutoReg(0)" in summ
@pytest.mark.smoke
def test_autoreg_score():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), 3)
res = ar.fit()
score = ar.score(res.params)
assert isinstance(score, np.ndarray)
assert score.shape == (4,)
assert ar.information(res.params).shape == (4, 4)
assert_allclose(-ar.hessian(res.params), ar.information(res.params))
def test_autoreg_roots():
data = sunspots.load_pandas()
ar = AutoReg(np.asarray(data.endog), lags=1)
res = ar.fit()
assert_almost_equal(res.roots, np.array([1.0 / res.params[-1]]))
def test_equiv_dynamic(reset_randomstate):
e = np.random.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
mod = AutoReg(y, 1)
res = mod.fit()
pred0 = res.predict(500, 800, dynamic=0)
pred1 = res.predict(500, 800, dynamic=True)
idx = pd.date_range(dt.datetime(2000, 1, 30), periods=1001, freq="M")
y = pd.Series(y, index=idx)
mod = AutoReg(y, 1)
res = mod.fit()
pred2 = res.predict(idx[500], idx[800], dynamic=idx[500])
pred3 = res.predict(idx[500], idx[800], dynamic=0)
pred4 = res.predict(idx[500], idx[800], dynamic=True)
assert_allclose(pred0, pred1)
assert_allclose(pred0, pred2)
assert_allclose(pred0, pred3)
assert_allclose(pred0, pred4)
def test_dynamic_against_sarimax():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
for i in range(1, 1001):
y[i] = 0.9 * y[i - 1] + e[i]
smod = SARIMAX(y, order=(1, 0, 0), trend="c")
sres = smod.fit(disp=False)
mod = AutoReg(y, 1)
spred = sres.predict(900, 1100)
pred = mod.predict(sres.params[:2], 900, 1100)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=True)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=True)
assert_allclose(spred, pred)
spred = sres.predict(900, 1100, dynamic=50)
pred = mod.predict(sres.params[:2], 900, 1100, dynamic=50)
assert_allclose(spred, pred)
def test_predict_seasonal():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))
effects = 10 * np.cos(np.arange(12) / 11 * 2 * np.pi)
for i in range(1, 1001):
y[i] = 10 + 0.9 * y[i - 1] + e[i] + effects[i % 12]
ys = pd.Series(
y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq="M")
)
mod = AutoReg(ys, 1, seasonal=True)
res = mod.fit()
c = res.params.iloc[0]
seasons = np.zeros(12)
seasons[1:] = res.params.iloc[1:-1]
ar = res.params.iloc[-1]
pred = res.predict(900, 1100, True)
direct = np.zeros(201)
direct[0] = y[899] * ar + c + seasons[900 % 12]
for i in range(1, 201):
direct[i] = direct[i - 1] * ar + c + seasons[(900 + i) % 12]
direct = pd.Series(
direct, index=pd.date_range(ys.index[900], periods=201, freq="M")
)
assert_series_equal(pred, direct)
pred = res.predict(900, dynamic=False)
direct = y[899:-1] * ar + c + seasons[np.arange(900, 1001) % 12]
direct = pd.Series(
direct, index=pd.date_range(ys.index[900], periods=101, freq="M")
)
assert_series_equal(pred, direct)
def test_predict_exog():
rs = np.random.RandomState(12345678)
e = rs.standard_normal(1001)
y = np.empty(1001)
x = rs.standard_normal((1001, 2))
y[:3] = e[:3] * np.sqrt(1.0 / (1 - 0.9 ** 2)) + x[:3].sum(1)
for i in range(3, 1001):
y[i] = 10 + 0.9 * y[i - 1] - 0.5 * y[i - 3] + e[i] + x[i].sum()
ys = pd.Series(
y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq="M")
)
xdf = pd.DataFrame(x, columns=["x0", "x1"], index=ys.index)
mod = AutoReg(ys, [1, 3], trend="c", exog=xdf)
res = mod.fit()
assert "-X" in str(res.summary())
pred = res.predict(900)
c = res.params.iloc[0]
ar = res.params.iloc[1:3]
ex = np.asarray(res.params.iloc[3:])
direct = c + ar[0] * y[899:-1] + ar[1] * y[897:-3]
direct += ex[0] * x[900:, 0] + ex[1] * x[900:, 1]
idx = pd.date_range(ys.index[900], periods=101, freq="M")
direct = | pd.Series(direct, index=idx) | pandas.Series |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing
from shapash.utils.columntransformer_backend import get_feature_names, get_names, get_list_features_names
# TODO
# StandardScaler return object vs float vs int
# Target encoding return object vs float
class TestInverseTransformColumnsTransformer(unittest.TestCase):
def test_inv_transform_ct_1(self):
"""
test inv_transform_ct with multiple encoding and drop option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_2(self):
"""
test inv_transform_ct with multiple encoding and passthrough option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_3(self):
"""
test inv_transform_ct with multiple encoding and dictionnary
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['CH', 'CH', 'PR'],
'onehot_ce_state': ['US-FR', 'US-FR', 'US-FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A-B', 'A-B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
input_dict1 = dict()
input_dict1['col'] = 'onehot_ce_city'
input_dict1['mapping'] = pd.Series(data=['chicago', 'paris'], index=['CH', 'PR'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'other'
input_dict2['mapping'] = pd.Series(data=['A', 'B', 'C'], index=['A-B', 'A-B', 'C'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'onehot_ce_state'
input_dict3['mapping'] = pd.Series(data=['US', 'FR'], index=['US-FR', 'US-FR'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
original = inverse_transform(result, [enc,input_dict1,list_dict])
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_4(self):
"""
test inv_transform_ct with single target category encoders and passthrough option
"""
y = pd.DataFrame(data=[0, 1, 1, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago'],
'state': ['US', 'FR', 'FR', 'US'],
'other': ['A', 'B', 'B', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='passthrough')
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
dtype=object)
enc.fit(train, y)
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_5(self):
"""
test inv_transform_ct with single target category encoders and drop option
"""
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'chicago', 'paris'],
'state': ['US', 'FR', 'US', 'FR'],
'other': ['A', 'B', 'A', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={
'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
| pd.testing.assert_frame_equal(original, expected) | pandas.testing.assert_frame_equal |
import numpy as np # 数値計算
import pandas as pd # DataFrame
import matplotlib.pyplot as plt # グラフ描画
#import seaborn as sns # グラフ描画設定
from tqdm import tqdm # forループの進捗状況確認
import codecs # Shift-JIS読み込み用
#from fbprophet import Prophet # prophet
import pandas
import pandas as pd
#sns.set_style(style='ticks') # グラフスタイルの指定
years = range(1888, 2018, 10)
read_path = 'data/'
all_data = pd.DataFrame()
data1=pandas.read_csv(read_path+"data_1872.csv",encoding='Shift-JIS')
print(data1)
with codecs.open(read_path+"data_1878.csv", "r",
"Shift-JIS", "ignore") as f:
data2 = pd.read_table(
f, delimiter=",", skiprows=5, index_col=0,
usecols=[0, 1, 4])
#data2.index = pd.to_datetime(data2.index)
data2.index = pd.RangeIndex(start=0, stop=len(data2), step=1)
print(data2)
data_ave = data2
data_ave.columns = ['temp', 'rain']
plt.figure(num=None, figsize=(30, 15), dpi=60)
print('Plotting Results')
plt.subplot(2, 1, 1)
plt.plot(data_ave['temp'])
plt.subplot(2, 1, 2)
plt.plot(data_ave['rain'])
plt.pause(3)
plt.savefig('plot_epoch_{0:03d}_temp.png'.format(1878), dpi=60)
plt.close()
for year in tqdm(years):
data_file = 'data_{}.csv'.format(year)
# 補足1:普通にpd.read_csvすると読み込めない
with codecs.open(read_path+data_file, "r",
"Shift-JIS", "ignore") as f:
# 補足2:先頭数行のデータは使用しない.
# また,不要なカラムは除外して利用する.
data = pd.read_table(
f, delimiter=",", skiprows=5, index_col=0,
usecols=[0, 1, 4])
data.index = pd.RangeIndex(start=0, stop=len(data), step=1)
data = data.dropna(how='all')
data.columns = ['temp', 'rain']
plt.figure(num=None, figsize=(30, 15), dpi=60)
print('Plotting Results')
plt.subplot(2, 1, 1)
plt.plot(data['temp'])
plt.subplot(2, 1, 2)
plt.plot(data['rain'])
plt.pause(3)
plt.savefig('plot_epoch_{0:03d}_temp.png'.format(year), dpi=60)
plt.close()
# datetime形式に変換
#data.index = pd.to_datetime(data.index)
#data.index = pd.RangeIndex(start=0, stop=len(data), step=1)
# all_dataを更新
all_data = pd.concat([all_data, data])
data_ave= data_ave + data
data_ave.columns = ['temp', 'rain']
data_ave.index = pd.RangeIndex(start=0, stop=len(data_ave), step=1)
data_ave = data_ave.dropna(how='all')
##data.columns = ['temp', 'rain']
plt.figure(num=None, figsize=(30, 15), dpi=60)
print('Plotting Results')
plt.subplot(2, 1, 1)
plt.plot(data_ave['temp'])
plt.subplot(2, 1, 2)
plt.plot(data_ave['rain'])
plt.pause(3)
plt.savefig('plot_epoch_ave{0:03d}_temp.png'.format(year), dpi=60)
plt.close()
# カラム名を変更
#data.columns = ['temp', 'rain']
#plt.savefig('plot_epoch_{}_temp.png'.format("1878-2018"), dpi=60)
#plt.close()
all_data.index = pd.RangeIndex(start=0, stop=len(all_data), step=1)
# 補足3:すべてNanの行は削除
all_data = all_data.dropna(how='all')
print(data)
plt.figure(num=None, figsize=(30, 15), dpi=60)
print('Plotting Results')
plt.subplot(2, 1, 1);
plt.plot(data_ave["temp"][:365]) #plt.ylim(-120, 120)
#plt.ylim(-5, 45)
#plt.xlim(0, 2000)
plt.subplot(2, 1, 2)
plt.plot(data_ave["rain"][:365])
#plt.ylim(-120, 120)
#plt.ylim(0, 100)
#plt.xlim(0, 2000)
plt.pause(3)
plt.savefig('plot_epoch_{}_temp.png'.format("data_ave365"), dpi=60)
plt.close()
plt.figure(num=None, figsize=(30, 15), dpi=60)
print('Plotting Results')
plt.subplot(2, 1, 1);
plt.plot(data_ave["temp"][366:730]) #plt.ylim(-120, 120)
#plt.ylim(-5, 45)
#plt.xlim(0, 2000)
plt.subplot(2, 1, 2)
plt.plot(data_ave["rain"][366:730])
#plt.ylim(-120, 120)
#plt.ylim(0, 100)
#plt.xlim(0, 2000)
plt.pause(3)
plt.savefig('plot_epoch_{}_temp.png'.format("data_ave730"), dpi=60)
plt.close()
data_ave_mon=pd.DataFrame()
data_ave_mon1=pd.DataFrame()
data_ave_mon1=data_ave[:365]/14
data_ave_mon2=pd.DataFrame()
data_ave_mon2=data_ave[366:730]/14
data_ave_mon3=pd.DataFrame()
data_ave_mon3=data_ave[731:1095]/14
data_ave_mon4=pd.DataFrame()
data_ave_mon4=data_ave[1096:1460]/14
data_ave_mon5=pd.DataFrame()
data_ave_mon5=data_ave[1461:1825]/14
data_ave_mon6= | pd.DataFrame() | pandas.DataFrame |
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import r_regression
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class UnivariateFeatureSelection:
def __init__(self, X, y, type="regression"):
self.X = X
self.y = y
self.type = type
# classification
def chi2__fit(self):
K = SelectKBest(chi2, k="all")
K.fit(self.X, self.y)
self.chi2_score = K.scores_
self.chi2_pvalues = K.pvalues_
self.chi2_selected_features = K.get_support()
self.chi2_selected_features_names = self.X.columns[self.chi2_selected_features]
return self
def f_classif__fit(self):
K = SelectKBest(f_classif, k="all")
K.fit(self.X, self.y)
self.f_classif_score = K.scores_
self.f_classif_pvalues = K.pvalues_
self.f_classif_selected_features = K.get_support()
self.f_classif_selected_features_names = self.X.columns[self.f_classif_selected_features]
return self
def mutual_info_classif__fit(self):
K = SelectKBest(mutual_info_classif, k="all")
K.fit(self.X, self.y)
self.mutual_info_classif_score = K.scores_
self.mutual_info_classif_pvalues = K.pvalues_
self.mutual_info_classif_selected_features = K.get_support()
self.mutual_info_classif_selected_features_names = self.X.columns[
self.mutual_info_classif_selected_features]
return self
# regression
def mutual_info_regression__fit(self):
K = SelectKBest(mutual_info_regression, k="all")
K.fit(self.X, self.y)
self.mutual_info_regression_score = K.scores_
self.mutual_info_regression_pvalues = K.pvalues_
self.mutual_info_regression_selected_features = K.get_support()
self.mutual_info_regression_selected_features_names = self.X.columns[
self.mutual_info_regression_selected_features]
return self
def f_regression__fit(self):
K = SelectKBest(f_regression, k="all")
K.fit(self.X, self.y)
self.f_regression_score = K.scores_
self.f_regression_pvalues = K.pvalues_
self.f_regression_selected_features = K.get_support()
self.f_regression_selected_features_names = self.X.columns[
self.f_regression_selected_features]
return self
def r_regression__fit(self):
K = SelectKBest(r_regression, k="all")
K.fit(self.X, self.y)
self.r_regression_score = K.scores_
self.r_regression_pvalues = K.pvalues_
self.r_regression_selected_features = K.get_support()
self.r_regression_selected_features_names = self.X.columns[
self.r_regression_selected_features]
return self
def get_selected_features(self):
if self.type == "classification":
self.chi2__fit()
self.chi2_df = pd.concat([pd.DataFrame(
self.chi2_selected_features_names), pd.DataFrame(self.chi2_score)], axis=1)
self.chi2_df.columns = ["Features", "Chi Squared Score"]
self.f_classif__fit()
self.f_classif_df = pd.concat([pd.DataFrame(
self.f_classif_selected_features_names), pd.DataFrame(self.f_classif_score)], axis=1)
self.f_classif_df.columns = ["Features", "F Score"]
self.mutual_info_classif__fit()
self.mutual_info_classif_df = pd.concat([pd.DataFrame(
self.mutual_info_classif_selected_features_names), pd.DataFrame(self.mutual_info_classif_score)], axis=1)
self.mutual_info_classif_df.columns = [
"Features", "Mutual Information Score"]
elif self.type == "regression":
self.f_regression__fit()
self.f_regression_df = pd.concat([pd.DataFrame(
self.f_regression_selected_features_names), pd.DataFrame(self.f_regression_score)], axis=1)
self.f_regression_df.columns = ["Features", "F Score"]
self.r_regression__fit()
self.r_regression_df = pd.concat([pd.DataFrame(
self.r_regression_selected_features_names), | pd.DataFrame(self.r_regression_score) | pandas.DataFrame |
"""
containing general use functions
@author: <NAME>
"""
# #############################################################################
# ################################# IMPORTS ###################################
# #############################################################################
import numpy as np
import openpyxl
import pandas as pd
import itertools
import scipy.linalg as LA
# import os
import pathlib
import datetime
from scipy import stats as SS
from typing import Tuple, Dict, Union
# #############################################################################
# ########################## FILE MANAGEMENT/READING ########################
# #############################################################################
def create_path(path):
now = datetime.datetime.now()
now_dir = now.strftime("%Y%m%d_%H%M%S/")
new_path = path / now_dir
new_path.mkdir()
# if not os.path.exists(new_path):
# os.makedirs(new_path)
return new_path
def get_str_array_from_h5(h5_data, dat, dtype='U'):
dat = np.array(dat).flatten()
for i in range(len(dat)):
my_str = np.array(h5_data[dat[i]]).flatten()
my_str = ''.join([chr(c) for c in my_str])
dat[i] = my_str
dat = dat.astype(dtype)
return dat
def get_sheet(file_name, sheet_name=''):
wb = openpyxl.load_workbook(file_name)
if sheet_name == '':
print('sheet names:', wb.sheetnames)
sheet_name = wb.sheetnames[0]
return wb[sheet_name]
def get_data(sheet, i_begin, i_n, j_begin, j_n): # first rows, then columns
output = np.zeros((i_n, j_n))
for i in range(i_n):
for j in range(j_n):
output[i, j] = sheet.cell(row=i+i_begin, column=j+j_begin).value
return output
def get_labels_clmn(sheet, i_begin, i_n, j):
output = []
for i in range(i_n):
output.append(sheet.cell(row=i+i_begin, column=j).value)
return output
def get_labels_row(sheet, i_begin, i_n, j):
output = []
for i in range(i_n):
output.append(sheet.cell(row=j, column=i+i_begin).value)
return output
# #############################################################################
# ########################## STRING FUNCTIONS ###############################
# #############################################################################
def get_strs_in_cats(str_list, cats, compl=False):
"""
str_list is the general list of strings, usually the list of all cell names
cats is a selection of strings, it is usually a list
for any x in str_list, if x is part of any y (y is an element of cats)
then x is chosen. Example of str_list: all the cells in the experiment
example of cats: ['Broad T', 'Keystone L'], then all the cells which have
a substring 'Broad T' or the substring 'Keystone L' will be chosen.
An other way of putting it: choosing all the cells that are part
of any of the categories in cats
the option compl gives the complementary set of names
"""
if not compl:
return [name for name in str_list if any(x in name for x in cats)]
else:
return [name for name in str_list if not any(x in name for x in cats)]
def get_cat(cell_name, cats):
"""
return the category to which the cell_name belongs to
"""
for cat in cats:
if cat in cell_name:
return cat
raise ValueError(f'{cell_name} seem to not belong to any category {cats}')
def replace_substr(string: str, dict_str: Dict[str, str]):
for key, val in dict_str.items():
string = string.replace(key, val)
return string
def replace_substr_np(array: Union[list, np.ndarray],
dict_str: Dict[str, str]) -> Union[list, np.ndarray]:
for i in range(len(array)):
array[i] = replace_substr(array[i], dict_str)
return array
def is_str_included(A: Union[list, np.ndarray], B: Union[list, np.ndarray])\
-> bool:
"""
Checks if every string in the list A is a substring of B
"""
return [x for x in A if any(x in y for y in B)] == A
def repl_add(name: str, old: str, new: str):
"""
if the string old is in the string name, it is removed, and the string new
is added at the end of the string name
the updated string is returned
"""
if old in name:
name = name.replace(old, '')
name = name + new
return name
def repl_preadd(name: str, old: str, new: str):
"""
if the string old is in the string name, it is removed, and the string new
is addded at the beginning of the string name
the updated string is returned
"""
if old in name:
name = name.replace(old, '')
name = new + name
return name
# =============================================================================
# def get_matching_names(sub_list, full_list):
# # take a name in the list with the shorter names
# short_str = sub_list[0]
# # find in the list with longer names the equivalent name, just longer
# long_str = list(filter(lambda x: short_str in x, full_list))[0]
# # find what is missing
# suffix = long_str[len(short_str):]
# # add it to the initial list of names
# chosen_list = [name + suffix for name in sub_list]
#
# check = np.array([name in full_list for name in chosen_list])
# if np.sum(check-1) != 0:
# print('we have a problem in get_matching_names')
#
# return chosen_list
# =============================================================================
# this version should be more stable. The one above should be removed
def get_match_names(sub_list, full_list):
chosen_list = []
# take a name in the list with the shorter names
for short_str in sub_list:
# find in the list with longer names the equivalent name, just longer
long_str = list(filter(lambda x: short_str in x, full_list))[0]
# add it to the initial list of names
chosen_list.append(long_str)
check = np.array([name in full_list for name in chosen_list])
if np.sum(check-1) != 0:
print('we have a problem in get_match_names')
return chosen_list
def fullprint(data):
opt = np.get_printoptions()
np.set_printoptions(threshold=np.inf)
np.set_printoptions(suppress=True)
np.set_printoptions(linewidth=np.nan)
print(data)
np.set_printoptions(**opt)
def have_same_index(A: pd.DataFrame, B: pd.DataFrame) -> bool:
return set(A.index) == set(B.index)
def align_indices(A: pd.DataFrame, B: pd.DataFrame)\
-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
What this function does is making the 2 dataframes have the same order
in the index. And in addition, it also covers the case when the indices
are not exactly the same (we work with strings here),
but one name is shorter than the other. In that case strings also become
the same in both dataframes, and the strings becore as in the dataframe
with the longer strings.
the alignement happens according to the first dataset A, so A is not
changed, but B is sorted
A and B must be DataFrames, strings become longer
"""
if len(A) != len(B):
raise ValueError(f"datasets don't have the same index length: "
+ f"{A.shape} {B.shape}")
if set(A.index) == set(B.index):
pass
elif is_str_included(list(A.index), list(B.index)):
l1 = get_match_names(list(A.index), list(B.index))
dict_cells = dict(zip(list(A.index), l1))
A = A.rename(index=dict_cells)
elif is_str_included(list(B.index), list(A.index)):
l1 = get_match_names(list(B.index), list(A.index))
dict_cells = dict(zip(list(B.index), l1))
B = B.rename(index=dict_cells)
else:
raise ValueError('Datasets do not have same indices')
# this is just a double check
if set(A.index) == set(B.index):
srt = A.index
B = B.loc[srt]
else:
raise ValueError('Datasets do not have same indices')
return A, B
def get_change_idx(df: pd.DataFrame, level):
x = list(df.index.get_level_values(level))
x_change = np.where(np.diff(x) != 0)[0] + 1
return x_change
# #############################################################################
# ########################## CALCULUS FUNCTIONS #############################
# #############################################################################
def add_point1(X: np.ndarray) -> np.ndarray:
"""
for 1d arrays, adds a point at the end of the array, which a duplicate
"""
return np.append(X, X[-1])
def add_point2(X: np.ndarray) -> np.ndarray:
"""
for 2d arrays
"""
# here concatenate can be replaced by append without change of results
X1 = np.array([X[:, -1]]).T
return np.concatenate((X, X1), axis=1)
def add_point3(X: np.ndarray) -> np.ndarray:
"""
for 3d arrays
"""
X1 = np.rollaxis(np.array([X[:, :, -1]]), 0, 3)
return np.concatenate((X, X1), axis=2)
def rectify(data: Union[np.ndarray, pd.DataFrame])\
-> Union[np.ndarray, pd.DataFrame]:
# return data * (np.sign(data)+1.)/2.
return np.maximum(0, data) # probably faster...
def subtr_diag(A, doit: bool = True):
'''
just removing the diagonal of a square matrix
'''
if doit:
return A - np.diag(np.diag(A))
else:
return A
def shift_pos(data):
return data - np.min(data)
def func_hill(x, y_max, EC_50, n):
return y_max * x**n/(x**n + EC_50**n)
def get_ctr_norm(X, opt=0):
"""
for dataframes, each columns is cetered and normalized
for series, the serie is centered and normalized
"""
if X.ndim == 1:
return get_ctr_norm1(X, opt)
elif X.ndim == 2:
return get_ctr_norm2(X, opt)
else:
raise TypeError('there is something wrong with the dataset')
def get_ctr_norm1(X, opt=0):
"""
center and normalization for series
options: 0, 1, 2
0: returns cn
1: returns (c, cn)
2: returns (c, n, cn)
"""
X_c = X - X.mean()
X_cn = X_c / LA.norm(X_c)
if opt == 0:
return X_cn
elif opt == 1:
return X_c, X_cn
elif opt == 2:
X_n = X / LA.norm(X)
return X_c, X_n, X_cn
else:
raise ValueError('there is no option ' + str(opt))
def get_ctr_norm2(X, opt=0):
"""
each column is centered and normalized
opt can be 0, 1, 2
0: returns cn
1: returns (c, cn)
2: returns (c, n, cn)
"""
X_c = X.subtract(X.mean(axis=0))
X_cn = X_c.divide(LA.norm(X_c, axis=0), axis=1)
if opt == 0:
return X_cn
elif opt == 1:
return X_c, X_cn
elif opt == 2:
X_n = X.divide(LA.norm(X, axis=0), axis=1)
return X_c, X_n, X_cn
else:
raise ValueError('there is no option ' + str(opt))
def get_ctr_norm_np(X, opt=0):
"""
for dataframes, each columns is centered and normalized
for series, the serie is centered and normalized
"""
if X.ndim == 1:
return get_ctr_norm1(X, opt)
elif X.ndim == 2:
return get_ctr_norm2_np(X, opt)
else:
raise TypeError('there is something wrong with the dataset')
def get_ctr_norm2_np(X, opt=0):
"""
each column is centered and normalized, which is the 2nd dimension
Meaning that the 2nd index is kept fix, and the first index is the one that
is the different coordinate/indexes of the vector
opt can be 0, 1, 2
0: returns cn
1: returns (c, cn)
2: returns (c, n, cn)
"""
X_c = X - X.mean(axis=0)
X_cn = X_c / LA.norm(X_c, axis=0)
if opt == 0:
return X_cn
elif opt == 1:
return X_c, X_cn
elif opt == 2:
X_n = X / LA.norm(X, axis=0)
return X_c, X_n, X_cn
else:
raise ValueError('there is no option ' + str(opt))
def get_norm(X):
"""
for dataframes, each columns is normalized
for series, the serie is centered and normalized
"""
if X.ndim == 1:
return get_norm_np(X)
elif X.ndim == 2:
return get_norm2(X)
else:
raise TypeError('there is something wrong with the dataset')
def get_norm2(X):
"""
each column is normalized
"""
return X.divide(LA.norm(X, axis=0), axis=1)
def get_norm_np(X):
"""
for dataframes, each columns is normalized
for series, the serie is normalized
"""
return X / LA.norm(X, axis=0)
def is_mean_0(A, verbose: bool = False) -> bool:
"""
tests if the mean is 0
might be useful to rewrite using the function allclose
"""
A_c = A.mean()
A_max = np.abs(A).max(skipna=False)
if np.sum(A_max) == 0:
raise ValueError('The data is not clean')
zeros = (A_max == 0) | np.isnan(A_max)
# this is the number of places where it is not 0
non_zeros1 = ((A.loc[:, zeros].sum() == 0) - 1).sum()
non_zeros2 = ((A_c[~zeros]/A_max[~zeros] < 1e-15) - 1).sum()
non_zeros3 = ((A_c[~zeros] < 1e-15) - 1).sum()
if verbose is True:
print(non_zeros1, non_zeros2, non_zeros3)
cond1 = (non_zeros1 + non_zeros2 == 0)
cond2 = (non_zeros1 + non_zeros3 == 0)
return cond1 or cond2
def is_norm_1(A: pd.DataFrame, verbose: bool = False) -> bool:
"""
testing if the norm is 1, A is a pandas array here
probably should write a similar function for a pure numpy array
also i imagine there is a better way to test this, maybe something is
already implemented of that sort
YES: would make sense to rewrite with the function allclose
"""
A_max = np.abs(A).max(skipna=False)
if np.sum(A_max) == 0:
raise ValueError('the data is not clean')
zeros = (A_max == 0) | np.isnan(A_max)
norms = LA.norm(A.loc[:, ~zeros], axis=0)
norms_1 = (np.abs((norms - 1)) < 1e-15)
norms_cond = (norms_1 - 1).sum() # number of mismatches
if verbose is True:
print(norms_cond)
return norms_cond == 0
def is_norm_1_np(A: np.ndarray, verbose: bool = False) -> bool:
"""
testing if the norm is 1, A is a pandas array here
probably should write a similar function for a pure numpy array
also i imagine there is a better way to test this, maybe something is
already implemented of that sort
YES: would make sense to rewrite with the function allclose
"""
A_max = np.abs(A).max()
if np.sum(A_max) == 0:
raise ValueError('the data is not clean')
zeros = (A_max == 0) | np.isnan(A_max)
norms = LA.norm(A[:, ~zeros], axis=0)
norms_1 = (np.abs((norms - 1)) < 1e-15)
norms_cond = (norms_1 - 1).sum() # number of mismatches
if verbose is True:
print(norms_cond)
return norms_cond == 0
def get_proj_mat(v: Union[pd.DataFrame, np.ndarray]) -> np.ndarray:
"""
gives the projection matrix based of the given column vectors
Parameters
----------
v
Returns
-------
"""
if isinstance(v, pd.DataFrame):
v = v.values
inv = LA.inv(v.T @ v)
return v @ inv @ v.T
def get_pdf_1(data, bins_pdf, add_point=True, cdf_bool=False, checknan=False):
"""
data is a 1d array
the function gives back the pdf
"""
# =============================================================================
# pdf, _ = np.histogram(corr, bins=len(bins_pdf)-1,
# range=(bins_pdf[0], bins_pdf[-1]))
# =============================================================================
if checknan and np.isnan(data).any(): # if there is a single nan there
pdf = np.full(len(bins_pdf) - 1, np.nan)
else:
pdf, _ = np.histogram(data, bins=bins_pdf)
if cdf_bool:
pdf = np.cumsum(pdf)/len(data)
# adding a duplicated data point useful for the plotting:
if add_point:
pdf = add_point1(pdf)
return pdf
def get_pdf_2(data, bins_pdf, add_point=True, cdf_bool=True, checknan=False):
"""
data is a 2d array, the first dimension are the iterations
the function gives back the pdfs
"""
N_iter = len(data)
pdfs = np.zeros((N_iter, len(bins_pdf) - 1))
for i in range(N_iter):
pdfs[i] = get_pdf_1(data[i], bins_pdf, add_point=False,
cdf_bool=False, checknan=checknan)
if cdf_bool:
pdfs = np.cumsum(pdfs, axis=1)/data.shape[1]
if add_point:
pdfs = add_point2(pdfs)
return pdfs
def get_pdf_3(data, bins_pdf, add_point=True, cdf_bool=False,
checknan=False):
"""
data is a 3d array, the first dimension are the iterations, the second
dimension is usually the cells
the function gives back the pdf
add_point option duplicated the last point
checknan checks if there are any nans in the set and gives nan as
result for the pdf instead 0 as would be calculated naturally
"""
N1, N2, N3 = data.shape
pdfs = np.zeros((N1, N2, len(bins_pdf) - 1))
for i in range(N1):
pdfs[i] = get_pdf_2(data[i], bins_pdf, add_point=False,
cdf_bool=False, checknan=checknan)
if cdf_bool:
pdfs = np.cumsum(pdfs, axis=2)/data.shape[2]
if add_point:
pdfs = add_point3(pdfs)
return pdfs
def get_pdf_cdf_1(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True,
checknan=False):
"""
corr is a 1d array
the function gives back the pdf and the cdf
"""
# =============================================================================
# pdf, _ = np.histogram(corr, bins=len(bins_pdf)-1,
# range=(bins_pdf[0], bins_pdf[-1]))
# =============================================================================
if checknan and np.isnan(corr).any(): # if there is a single nan there
pdf = np.full(len(bins_pdf) - 1, np.nan)
cdf = np.full(len(bins_cdf) - 1, np.nan)
else:
pdf, _ = np.histogram(corr, bins=bins_pdf)
cdf, _ = np.histogram(corr, bins=bins_cdf)
if cdf_bool:
cdf = np.cumsum(cdf)/len(corr)
# cumfreq is much slower (about 4 folds) because it is calculating the
# linspace at each call
# =============================================================================
# cdf, _, _, _ = SS.cumfreq(corr, numbins=len(bins_cdf)-1,
# defaultreallimits=(bins_cdf[0], bins_cdf[-1]))
# cdf /= len(corr)
# =============================================================================
# adding a duplicated data point useful for the plotting:
if add_point:
pdf = add_point1(pdf)
cdf = add_point1(cdf)
return pdf, cdf
def get_pdf_cdf_2(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True,
checknan=False):
"""
corr is a 2d array, the first dimension are the iterations
the function gives back the pdfs and the cdfs
"""
N_iter = len(corr)
pdfs = np.zeros((N_iter, len(bins_pdf) - 1))
cdfs = np.zeros((N_iter, len(bins_cdf) - 1))
for i in range(N_iter):
pdfs[i], cdfs[i] = get_pdf_cdf_1(corr[i], bins_pdf, bins_cdf,
add_point=False, cdf_bool=False,
checknan=checknan)
if cdf_bool:
cdfs = np.cumsum(cdfs, axis=1)/corr.shape[1]
if add_point:
pdfs = add_point2(pdfs)
cdfs = add_point2(cdfs)
return pdfs, cdfs
def get_pdf_cdf_3(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True,
checknan=False):
"""
corr is a 3d array, the first dimension are the iterations, the second
dimension is usually the cells
the function gives back the pdf and the cdf
add_point option duplicated the last point
checknan checks if there are any nans in the set and gives nan as
result for the pdf and cdf instead 0 as would be calculated naturally
"""
N1, N2, N3 = corr.shape
pdfs = np.zeros((N1, N2, len(bins_pdf) - 1))
cdfs = np.zeros((N1, N2, len(bins_cdf) - 1))
for i in range(N1):
pdfs[i], cdfs[i] = get_pdf_cdf_2(corr[i], bins_pdf, bins_cdf,
add_point=False, cdf_bool=False,
checknan=checknan)
if cdf_bool:
cdfs = np.cumsum(cdfs, axis=2)/corr.shape[2]
if add_point:
pdfs = add_point3(pdfs)
cdfs = add_point3(cdfs)
return pdfs, cdfs
def get_max_diff(curves, curve1):
"""
curves can be 1D, 2D, 3D.
the max is always done on the last dimension which is the dimension
of one individual curve
curve1 is (are) the mean curve(s)
"""
n_dim = curves.ndim
diffs = curves - curve1
return np.max(diffs, axis=n_dim-1)
def get_min_diff(curves, curve1):
"""
curves can be 1D, 2D, 3D.
the max is always done on the last dimension which is the dimension
of one individual curve
curve1 is (are) the mean curve(s)
returning the absolute deviation
"""
n_dim = curves.ndim
diffs = curves - curve1
return -np.min(diffs, axis=n_dim-1)
def get_entries(A: Union[pd.DataFrame, np.ndarray], diag: bool=False)\
-> np.ndarray:
"""
need to be a square matrix
returns the entries of the matrix, ignoring the diagonal elements, apart
if diag=True
"""
if isinstance(A, pd.DataFrame):
A = A.values
if diag:
return A.flatten()
else:
idx = ~np.eye(len(A), dtype=bool)
return A[idx]
def is_permutation_matrix(x):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
x = np.asanyarray(x)
return (x.ndim == 2 and x.shape[0] == x.shape[1] and
(x.sum(axis=0) == 1).all() and
(x.sum(axis=1) == 1).all() and
((x == 1) | (x == 0)).all())
FLOAT_TYPE = np.float32
FLOAT_TYPE = np.float64
EPSILON = np.finfo(FLOAT_TYPE).eps
def mat_mul1(A, B, alpha=1):
"""
https://www.benjaminjohnston.com.au/matmul
"""
# return np.matmul(A, B)
return LA.blas.sgemm(alpha, A, B)
def mat_mul2(A, B, alpha=1):
"""
https://www.benjaminjohnston.com.au/matmul
"""
return alpha * np.matmul(A, B)
# return LA.blas.sgemm(alpha, A, B)
if FLOAT_TYPE == np.float32:
mat_mul = mat_mul1
else:
mat_mul = mat_mul2
def mv_mul(A, x, alpha=1):
"""
https://www.benjaminjohnston.com.au/matmul
"""
# return np.matmul(A, B)
return LA.blas.sgemv(alpha, A, x)
def mat_mul_s(A, B, alpha=1):
"""
matrix multiplication where A is a symmetric matrix
i don't see a difference in performance though, maybe didn't try
for matrices large enough
https://www.benjaminjohnston.com.au/matmul
"""
# return np.matmul(A, B)
# return LA.blas.ssymm(alpha, A, B, side=1, c=B, beta=1)
return LA.blas.ssymm(alpha, A, B, side=1)
def get_random_spd_matrix(n_dim: int, eig_vals: np.ndarray) -> np.ndarray:
"""
generates a random symmetric positive definite matrix of size n_dim
and with eig_values given
"""
U = SS.ortho_group.rvs(n_dim)
return U.T @ np.diag(eig_vals) @ U
# other options:
# sklearn.datasets.make_spd_matrix(n_dim, random_state=None)
# using A, a random matrix and A.T @ A
# #############################################################################
# ################## ACTIVITY SVD ANALYSIS FUNCTIONS ##########################
# #############################################################################
def get_pps(df, pps=None):
"""
return the origianl data, the centered, the normalized and the centered
and normalzed version of the original data, put in a dictionary, with
"""
df_c, df_n, df_cn = get_ctr_norm(df, opt=2)
df_r = rectify(df)
to_return = {'o': df, 'c': df_c, 'n': df_n, 'cn': df_cn, 'r': df_r}
if pps is None:
pass
else:
to_return = {k: v for k, v in to_return.items() if k in pps}
return to_return
def get_svd_df(act_df, center=False):
"""
this function should returns the SVD, keeping the labels
the elements of the dictionary are U, s and Vh
"""
if center is True:
act_df = act_df.subtract(act_df.mean(axis=1), axis=0)
U, s, Vh = LA.svd(act_df, full_matrices=False)
n = len(s)
# now we want to create the DataFrames that will keep the labels
U_df = pd.DataFrame(U, index=act_df.index, columns=np.arange(1, n+1))
U_df.columns.name = 'PC'
Vh_df = pd.DataFrame(Vh, columns=act_df.columns, index=np.arange(1, n+1))
Vh_df.index.name = 'PC'
s_df = pd.DataFrame(np.diag(s),
columns=np.arange(1, n+1), index=np.arange(1, n+1))
return {'U': U_df, 's': s_df, 'Vh': Vh_df}
def sort_by_pc(U, level):
"""
This function takes as input the left or right eigenvectors of the
eigendecomposition of some matrix
When there are several levels in the MultiIndex, it averages over the
non-explicited levels.
"""
if level in U.index.names:
U = U.T
elif level not in U.columns.names:
print("there is a problem sorting")
return (None, None)
if U.columns.nlevels == 2:
sort1 = U.mean(axis=1, level=level).iloc[0].sort_values().index
sort2 = U.mean(axis=1, level=level).iloc[1].sort_values().index
elif U.columns.nlevels == 1 and U.columns.name == level:
sort1 = U.iloc[0].sort_values().index
sort2 = U.iloc[1].sort_values().index
else:
print('there is a problem sorting 2')
(sort1, sort2) = (None, None)
return {1: sort1, 2: sort2}
# #############################################################################
# ############### FUNCTIONS CALCULATING CORR AND SIGNIFICANCE ###############
# #############################################################################
# it is still not clear to me why i am not not just shuffling the whole
# 2d matrix, it seems it would be much easier and much quicker
# i don't understand why i am shuffling each column one by one.
def get_corr(A, B, check=True):
"""
getting the correlation coefficient between each columns of dataframe A
with each column of dataframe B
First checking if the labels of the rows are aligned and if each
column is centered and normalized
A and B should have the same x (row) indices as B, but they can be ordered
in a different way, if check is true it is first aligning the rows
of A and B
in the returned matrix, the rows are the columns of A and the columns
are the columns of B
"""
if check:
# aligning the 2 datasets
(A, B) = align_indices(A, B)
if check and ((not is_mean_0(A)) or (not is_mean_0(B))):
raise ValueError('dataset(s) not centralized: \n'
f'{np.abs(np.sum(A)).idxmax()}, {np.abs(np.sum(A)).max()}\n'
f'{np.abs(np.sum(B)).idxmax()}, {np.abs(np.sum(B)).max()}')
if check and ((not is_norm_1(A)) or (not is_norm_1(B))):
raise ValueError('dataset(s) not normalized:\n'
f'{np.abs(LA.norm(A, axis=0)-1)}, '
f'{np.abs(LA.norm(B, axis=0)-1)}')
# now, the data is aligned, centered and normalized, we know
# we can calculate the correlation coefficients
return A.T.dot(B)
# return A.values.T @ B.values
def get_cos_sim(A, B, check=True):
"""
getting the cos similarity between each columns of dataframe A
with each column of dataframe B.
We are only considering positive angles, i.e., between 0 and 1
angles smaller than 0 are switched sign
First checking if the labels of the rows are aligned and if each
column is normalized
"""
if check:
# aligning the 2 datasets
(A, B) = align_indices(A, B)
if check and ((not is_norm_1(A)) or (not is_norm_1(B))):
raise ValueError('dataset(s) not normalized:\n'
f'{np.abs(LA.norm(A, axis=0)-1)}, '
f'{np.abs(LA.norm(B, axis=0)-1)}')
# now, the data is aligned and normalized, we know
# we can calculate the cos similarity by
# return np.abs(A.T.dot(B))
# wondering if one should or not take the abs here
return A.T.dot(B)
# some comments on the significance testing:
# in the general case you will have a collection of column-vectors in matrix A
# and a collection of column vectors in matrix B. You are interested in the
# correlation coefficient between each of these vectors. So we are intested
# in correlations all against all.
# Now, when we are doing the significance testing, we are shuffling
# the vector entries.
# in the optimal case we will calculate the shuffling for each vector
# separately. However, we can make things faster
# I think the next function is only used in some old files
# so i am commenting it out
# =============================================================================
# def test_signif_1(A, B, N=10, verbose=False):
# """
# we assume that the given vectors are already aligned,
# centered and normalized
# for when A is 1D and B is 1D
# """
# corr = A @ B
# corr_shuffled = np.zeros(N)
#
# for i in range(N):
# B1 = np.random.permutation(B)
# corr_shuffled[i] = A @ B1
#
# if verbose is True:
# plt.hist(corr_shuffled, 100)
#
# prob_r = np.sum(corr_shuffled > abs(corr))/N
# prob_l = np.sum(corr_shuffled < -abs(corr))/N
# return (corr, np.mean(corr_shuffled), prob_l, prob_r)
# =============================================================================
# almost similar functions to above but for cos similarity
# CS stands for cos-similarity
def get_signif_general_2(A, B, func, N=10, dist=False):
"""
we assume that the given vectors are already aligned and normalized
A is 2D and B is 1D
A is d x n
B is d
corr, prob_l, prob_r are n
the option dist returns the cosine similarities that are issued
from the shuffling
func is usually either get_corr or get_cos_sim, that's why it is called
general
return 3 types of pvalues:
prob_o, it is the right one-tailed pvalue
prob_r, it is the prob of value coming from random generation comes
is on the right of abs(real_value)
prob_l, it is the prob of value coming from random generation comes
is on the left of -abs(real_value)
"""
# CS = np.abs(B @ A) # only positive values here
CS = func(A, B, check=False)
# CS is a 1D vector, so it is the same as abs(A.T @ B)
# the length of the vector is the same as the number of columns of A, n
CS_shuffled = np.zeros((N, len(A.T))) # N x n
for i in range(N):
B1 = np.random.permutation(B)
CS_shuffled[i] = func(A, B1, check=False)
if dist:
return CS, CS_shuffled # n, N x n
# we need to look at both the right and left probabilities,
# as in the case of SVD, the vectors could be oriented in both
# directions
prob_o = np.mean(CS_shuffled >= CS, axis=0) # o - original: 1 tailed
prob_r = np.mean(CS_shuffled >= np.abs(CS), axis=0)
prob_l = np.mean(CS_shuffled <= -np.abs(CS), axis=0)
return (CS, np.mean(CS_shuffled, axis=0), prob_o, prob_l, prob_r)
def get_signif_general_v1(A, B, func1, func2, N=10, dist=False):
"""
this version of significance testing is slower, it shuffles each column
of B.
It would make sense to choose as B the data that has less columns,
so that the shuffling procedure would be faster
the option dist returns the correlation coefficients that are issued
from the shuffling, i.e., the full distribution
func1 is usually either get_corr or get_cos_sim
"""
# aligning the 2 datasets, the order in B is kept, A is reordered
A, B = align_indices(A, B)
corr_df = func1(A, B)
# the corr_df will have as rows the columns of A and
# as columns the columns of B
# now that we have the actual correlation coefficient, we can
# calculate the distributions of correlations coefficients
# when the connectivity is shuffled
# in the commented version, it seems that both matrices are shuffled
# =============================================================================
# for i in range(len(U.columns)):
# U_1 = U.iloc[:, i].values
# for j in range(len(con1_df.columns)):
# con_1 = con1_df.iloc[:, j].values
# (corr1, _, _, prob1) = test_signif_1(U_1, con_1, N)
# prob1_df.iloc[i, j] = prob1
# =============================================================================
A_np = A.values
B_np = B.values
if dist:
corr_collection = np.zeros((N, *corr_df.shape))
for i in range(len(B.columns)): # iterating over the columns of B
_, corr_coll = func2(A_np, B_np[:, i], N, dist=True)
corr_collection[:, :, i] = corr_coll
return corr_df, corr_collection
pv_o_df = pd.DataFrame().reindex_like(corr_df)
pv_l_df = pd.DataFrame().reindex_like(corr_df)
pv_r_df = pd.DataFrame().reindex_like(corr_df)
for i in range(len(B.columns)): # iterating over the columns of B
_, _, pv_o, pv_l, pv_r = func2(A_np, B_np[:, i], N, dist=False)
pv_o_df.iloc[:, i] = pv_o
pv_l_df.iloc[:, i] = pv_l
pv_r_df.iloc[:, i] = pv_r
return corr_df, pv_o_df, pv_l_df, pv_r_df
def get_signif_general_v2(A, B, func, N=10, dist=False):
"""
this version of significance testing is much faster than the v1
but it might bring some bias.
It is basically shuffling all the A matrix at once, and not
columns by column as in the test_signif_v1 version
the option dist returns the correlation coefficients that are issued
from the shuffling, i.e., the full distribution
theoretically we should have that
pv_o = combine_pval(CS, pv_l, pv_r)
however it is not always exactly the case when CS is negative
because in that case in the combine pval it is
pval_l that is taken, but pval_l was calculated as
np.mean(CS_collection <= -np.abs(CS_df.values), axis=0)
that means if certain values in CS_collections are exactly equal
(happens in the case when the A or B have some sparcity)
and so the CS will be assigned as significative when they are not
In any case pv_o is more correct than the combine and should
always be preferred.
"""
# aligning the 2 datasets
(A, B) = align_indices(A, B)
CS_df = func(A, B, check=True)
A_np = A.values
B_np = B.values
# CS_collection is a 3D matrix, the first dim are the repetitions
# from shuffling, the next 2 dims are the same as for the real CS.
CS_collection = np.zeros((N, *CS_df.shape))
for i in range(N):
CS_collection[i] = func(A_np, np.random.permutation(B_np),
check=False)
if dist:
return CS_df, CS_collection
pv_o_df = | pd.DataFrame() | pandas.DataFrame |
"""
Module contains miscellaneous functions used for reading data, printing logo etc.
"""
import pickle
from random import sample
import networkx as nx
import pandas as pd
def read_testcase(FOLDER):
"""
Reads the GTFS network and preprocessed dict. If the dicts are not present, dict_builder_functions are called to construct them.
Returns:
stops_file (pandas.dataframe): stops.txt file in GTFS.
trips_file (pandas.dataframe): trips.txt file in GTFS.
stop_times_file (pandas.dataframe): stop_times.txt file in GTFS.
transfers_file (pandas.dataframe): dataframe with transfers (footpath) details.
stops_dict (dict): keys: route_id, values: list of stop id in the route_id. Format-> dict[route_id] = [stop_id]
stoptimes_dict (dict): keys: route ID, values: list of trips in the increasing order of start time. Format-> dict[route_ID] = [trip_1, trip_2] where trip_1 = [(stop id, arrival time), (stop id, arrival time)]
footpath_dict (dict): keys: from stop_id, values: list of tuples of form (to stop id, footpath duration). Format-> dict[stop_id]=[(stop_id, footpath_duration)]
route_by_stop_dict_new (dict): keys: stop_id, values: list of routes passing through the stop_id. Format-> dict[stop_id] = [route_id]
idx_by_route_stop_dict (dict): preprocessed dict. Format {(route id, stop id): stop index in route}.
"""
import gtfs_loader
from dict_builder import dict_builder_functions
stops_file, trips_file, stop_times_file, transfers_file = gtfs_loader.load_all_db(FOLDER)
try:
stops_dict, stoptimes_dict, footpath_dict, routes_by_stop_dict, idx_by_route_stop_dict = gtfs_loader.load_all_dict(FOLDER)
except FileNotFoundError:
stops_dict = dict_builder_functions.build_save_stops_dict(stop_times_file, trips_file, FOLDER)
stoptimes_dict = dict_builder_functions.build_save_stopstimes_dict(stop_times_file, trips_file, FOLDER)
routes_by_stop_dict = dict_builder_functions.build_save_route_by_stop(stop_times_file, FOLDER)
footpath_dict = dict_builder_functions.build_save_footpath_dict(transfers_file, FOLDER)
idx_by_route_stop_dict = dict_builder_functions.stop_idx_in_route(stop_times_file, FOLDER)
return stops_file, trips_file, stop_times_file, transfers_file, stops_dict, stoptimes_dict, footpath_dict, routes_by_stop_dict, idx_by_route_stop_dict
def print_logo():
"""
Prints the logo
"""
print("""
****************************************************************************************
* TRANSIT ROUTING ALGORITHMS *
* <NAME> <NAME> *
* (<EMAIL>) (<EMAIL>) *
****************************************************************************************
""")
return None
def print_network_details(transfers_file, trips_file, stops_file):
"""
Prints the network details like number of routes, trips, stops, footpath
Args:
transfers_file (pandas.dataframe):
trips_file (pandas.dataframe):
stops_file (pandas.dataframe):
Returns: None
"""
print("___________________________Network Details__________________________")
print("| No. of Routes | No. of Trips | No. of Stops | No. of Footapths |")
print(
f"| {len(set(trips_file.route_id))} | {len(set(trips_file.trip_id))} | {len(set(stops_file.stop_id))} | {len(transfers_file)} |")
print("____________________________________________________________________")
return None
def print_query_parameters(SOURCE, DESTINATION, D_TIME, MAX_TRANSFER, WALKING_FROM_SOURCE, variant, no_of_partitions=None,
weighting_scheme=None, partitioning_algorithm=None):
"""
Prints the input parameters related to the shortest path query
Args:
SOURCE (int): stop-id DESTINATION stop
DESTINATION (int/list): stop-id SOURCE stop. For Onetomany algorithms, this is a list.
D_TIME (pandas.datetime): Departure time
MAX_TRANSFER (int): Max transfer limit
WALKING_FROM_SOURCE (int): 1 or 0. 1 means walking from SOURCE is allowed.
variant (int): variant of the algorithm. 0 for normal version,
1 for range version,
2 for One-To-Many version,
3 for Hyper version
no_of_partitions: number of partitions network has been divided into
weighting_scheme: which weighing scheme has been used to generate partitions.
partitioning_algorithm: which algorithm has been used to generate partitions.
Returns: None
"""
print("___________________Query Parameters__________________")
print("Network: Switzerland")
print(f"SOURCE stop id: {SOURCE}")
print(f"DESTINATION stop id: {DESTINATION}")
print(f"Maximum Transfer allowed: {MAX_TRANSFER}")
print(f"Is walking from SOURCE allowed ?: {WALKING_FROM_SOURCE}")
if variant == 2 or variant == 1:
print(f"Earliest departure time: 24 hour (Profile Query)")
else:
print(f"Earliest departure time: {D_TIME}")
if variant == 4:
print(f"Number of partitions: {no_of_partitions}")
print(f"Partitioning Algorithm used: {partitioning_algorithm}")
print(f"Weighing scheme: {weighting_scheme}")
print("_____________________________________________________")
return None
def read_partitions(stop_times_file, FOLDER, no_of_partitions, weighting_scheme, partitioning_algorithm):
"""
Reads the fill-in information.
Args:
stop_times_file (pandas.dataframe): dataframe with stoptimes details
FOLDER (str): path to network folder.
no_of_partitions (int): number of partitions network has been divided into.
weighting_scheme (str): which weighing scheme has been used to generate partitions.
partitioning_algorithm (str):which algorithm has been used to generate partitions. Currently supported arguments are hmetis or kahypar.
Returns:
stop_out (dict) : key: stop-id (int), value: stop-cell id (int). Note: if stop-cell id of -1 denotes cut stop.
route_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of route ids belonging to the stop cell combination
cut_trips (set): set of trip ids that are part of fill-in.
trip_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of trip ids belonging to the stop cell combination
"""
import itertools
if partitioning_algorithm == "hmetis":
route_out = pd.read_csv(f'./partitions/{FOLDER}/routeout_{weighting_scheme}_{no_of_partitions}.csv',
usecols=['path_id', 'group']).groupby('group')
stop_out = pd.read_csv(f'./partitions/{FOLDER}/cutstops_{weighting_scheme}_{no_of_partitions}.csv', usecols=['stop_id', 'g_id'])
fill_ins = pd.read_csv(f'./partitions/{FOLDER}/fill_ins_{weighting_scheme}_{no_of_partitions}.csv')
elif partitioning_algorithm == "kahypar":
route_out = pd.read_csv(f'./kpartitions/{FOLDER}/routeout_{weighting_scheme}_{no_of_partitions}.csv', usecols=['path_id', 'group']).groupby('group')
stop_out = pd.read_csv(f'./kpartitions/{FOLDER}/cutstops_{weighting_scheme}_{no_of_partitions}.csv', usecols=['stop_id', 'g_id']).astype(int)
fill_ins = | pd.read_csv(f'./kpartitions/{FOLDER}/fill_ins_{weighting_scheme}_{no_of_partitions}.csv') | pandas.read_csv |
import os
import sys
sys.path.append(os.path.join(os.getcwd().split('xtraderbacktest')[0],'xtraderbacktest'))
import datetime
import modules.common.scheduler
import modules.other.logg
import logging
import modules.price_engine.price_loader as price_loader
import modules.other.sys_conf_loader as sys_conf_loader
import modules.price_engine.ticks_generater as ticks_generater
import modules.price_engine.price_period_converter as price_period_converter
import modules.other.date_converter as date_converter
import modules.backtest.save_backtest_result as save_backtest_result
import modules.backtest.backtest_result_analyse as backtest_result_analyse
import modules.price_engine.tick_loader as tick_loader
import modules.backtest.calendar_manager
import pandas as pd
from tqdm import tqdm
#from tqdm.auto import tqdm
import queue
import threading
import time
import numpy as np
TIMESTAMP_FORMAT = sys_conf_loader.get_sys_conf()["timeformat"]
class Scheduler(modules.common.scheduler.Scheduler):
def __init__(self,mode):
self.mode = mode
self.fake_tick = sys_conf_loader.get_sys_conf()["backtest_conf"]["tick_mode"]["is_fake"]
self.strategy = None
self.tick_queue = queue.Queue()
self.stop_by_error = False
self._calendar_manager = None
def register_strategy(self,strategy):
self.strategy = strategy
self.strategy._set_mode("backtest")
self.backtest_graininess = self.strategy.context["backtest_graininess"]
if self.strategy.context["pre_post_market"] == "enable":
self.use_pre_post_market_data = True
else:
self.use_pre_post_market_data = False
self.ohlc = OHLCManager(mode = sys_conf_loader.get_sys_conf()["backtest_conf"]["price_data_mode"]["mode"],symbols = strategy.context["symbols"],fr = self.strategy.context["start_date"],to = self.strategy.context["end_date"],graininess=self.backtest_graininess,pre_post_market=self.use_pre_post_market_data)
self.strategy.init()
def _generate_queue(self,fr,to):
# generate fake ticks
logging.info("Processing data before running backtest.")
# Get the set of date_list first
date_set = set()
with tqdm(total=len(self.ohlc.keys()),desc="Processing Data",colour ="green", ascii=True) as bar:
for symbol in self.ohlc.keys():
df = self.ohlc.get(symbol).copy()
df = df[(df.index >= pd.to_datetime(fr)) & (df.index <= pd.to_datetime(to))].copy()
date_set.update(pd.to_datetime(df.index.values).tolist())
bar.update(1)
bar.close()
# freq = date_converter.convert_period_to_seconds_pandas(self.backtest_graininess)
# per1 = pd.date_range(start =fr, end =to, freq = freq)
# for val in per1:
# date_set.add(val)
date_set = sorted(date_set)
logging.info("Symbol length "+ str(len(self.ohlc.keys())) + " Date Length " + str(len(date_set)))
display_dict = {
"date":""
}
with tqdm(total= len(date_set),desc="Tick Generator",colour ="green", ascii=True,postfix = display_dict,) as process_tick_bar:
for date in date_set:
temp_ticks = {}
for symbol in self.ohlc.keys():
if date in self.ohlc.get(symbol).index:
date_str = str(date)
if date_str not in temp_ticks.keys():
temp_ticks[date_str] = []
row = self.ohlc.get(symbol).loc[date]
fake_ticks = ticks_generater.generate_fake_ticks(symbol,date,row)
temp_ticks[date_str].extend(fake_ticks)
else:
#print(date,"not in self.ohlc.get(symbol).index")
pass
# sort the temp ticks
for date_str in temp_ticks.keys():
temp_ticks[date_str] = sorted(temp_ticks[date_str], key=lambda k: k['date'])
if self.stop_by_error is True:
break
# put into queue
for date_str in temp_ticks.keys():
for item in temp_ticks[date_str]:
self.tick_queue.put(item)
while(self.tick_queue.qsize() > 50000):
time.sleep(1)
process_tick_bar.update(1)
display_dict = {
"date":str(date)
}
process_tick_bar.set_postfix(display_dict)
process_tick_bar.close()
self.tick_queue.put({"end":"end"})
def _loop_ticks(self,last_min,total_ticks):
# loop ticks
logging.info("Start looping ticks.")
display_dict = {
"deposit":str(round(self.strategy.order_manager.position.deposit,2)),
"total_pnl ":str(round(self.strategy.order_manager.position.deposit - self.strategy.order_manager.position._init_deposit,2)),
"float_pnl ":str(round(self.strategy.order_manager.position.float_pnl,2)),
"date":""
}
with tqdm(total=total_ticks,desc="Tick Looper", postfix = display_dict, colour="green", ascii=True) as loop_tick_bar:
try:
tick = {"start":"start"}
last_ticks = {}
while("end" not in tick.keys()):
while(self.tick_queue.empty()):
time.sleep(0.2)
tick = self.tick_queue.get()
if "end" not in tick.keys():
date_str = tick["date"][0:10]
if self._calendar_manager is None and self.strategy.context["calendar_event"] == "enable":
self._calendar_manager = modules.backtest.calendar_manager.CalendarManager(tick["date"])
calendar_event_list = self._calendar_manager.get_events()
self.strategy.calendar_list.extend(calendar_event_list)
# handle to strategy internal fuc to deal with basic info, such as datetime
self.strategy._round_check_before(tick)
try:
self.strategy.handle_tick(tick)
except Exception as e:
self.stop_by_error = True
logging.error("Error in handle tick.")
logging.exception(e)
# handle to strategy internal fuc to deal with order handling, calculations and etc
new_bars,new_grainness = self.strategy._round_check_after(tick)
if new_grainness and self.strategy.context["calendar_event"] == "enable":
calendar_event_list = self._calendar_manager.round_check(tick["date"])
if len(calendar_event_list) > 0:
for event in calendar_event_list:
e = {
"type": "calendar",
"body":event
}
self.strategy.handle_event(e)
self.strategy.calendar_list.extend(calendar_event_list)
# if there is a new bar for the timeframe specified by strategy
if len(new_bars) > 0 :
for new_bar in new_bars:
# handle it to the strategy's logic to process new bar
new_bar_dict = {
"open":new_bar.open,
"high":new_bar.high,
"close":new_bar.close,
"low":new_bar.low,
"date":new_bar.date,
"symbol":new_bar.symbol,
"volume":new_bar.volume,
"open_interest":new_bar.open_interest,
"period":new_bar.period,
}
try:
self.strategy.handle_bar(new_bar_dict,new_bar_dict["period"])
except Exception as e:
self.stop_by_error = True
logging.error("Error in handle bar.")
logging.exception(e)
# handle to strategy internal fuc to deal with order handling, calculations and etc
self.strategy._round_check_before(tick)
self.strategy._update_position()
self.strategy._round_check_after_day(tick)
loop_tick_bar.update(1)
display_dict = {
"margin_rate":str(round(self.strategy.order_manager.position.get_margin_rate()*100,2)) + '%',
"deposit":str(round(self.strategy.order_manager.position.deposit,2)),
"total_pnl ":str(round(self.strategy.order_manager.position.deposit - self.strategy.order_manager.position._init_deposit,2)),
"float_pnl ":str(round(self.strategy.order_manager.position.float_pnl,2)),
"date":tick["date"]
}
loop_tick_bar.set_postfix(display_dict)
last_ticks[tick["symbol"]] = tick
# when it comes to end
self.strategy.close_all_position()
self.strategy.withdraw_pending_orders()
for symbol in last_ticks.keys():
self.strategy._round_check_after(last_ticks[symbol])
except Exception as e:
self.stop_by_error = True
logging.error("Internal Error.")
logging.exception(e)
loop_tick_bar.close()
def _send_real_ticks(self,real_ticks):
with tqdm(total=len(real_ticks),desc="Tick Sender",color="green", ascii=True) as loop_tick_bar:
for tick in real_ticks:
self.tick_queue.put(tick)
loop_tick_bar.update(1)
loop_tick_bar.close()
self.tick_queue.put({"end":"end"})
def start(self):
logging.info("Backtest Start.")
if self.strategy is None:
logging.error("There is no registered strategy.")
return
# get all symbols that the backtest need.
symbols = self.strategy.context["symbols"]
# get the time from and to
fr = self.strategy.context["start_date"]
to = self.strategy.context["end_date"]
if self.fake_tick is False:
# get real ticks
real_ticks = []
for symbol in self.ohlc.keys():
real_ticks.extend(tick_loader.load_ticks(symbol,fr,to))
# sort the real_ticks
real_ticks = sorted(real_ticks, key=lambda k: k['date'])
tick_t = threading.Thread(target = self._send_real_ticks,args=(real_ticks,))
tick_t.start()
else:
tick_t = threading.Thread(target = self._generate_queue,args=(fr,to))
tick_t.start()
# preload the dataframe into strategy
logging.info("Preloading ohlc into strategy")
with tqdm(total=len(self.ohlc.keys()),desc="Preloading ohlc",colour="green", ascii=True) as bar:
for symbol in self.ohlc.keys():
df = self.ohlc.get(symbol).copy()
df = df[(df.index < pd.to_datetime(fr))].copy(deep = True)
self.strategy._preload_data(symbol,df)
bar.update(1)
bar.close()
# start tick processing thread
date_set = set()
for symbol in self.ohlc.keys():
df = self.ohlc.get(symbol).copy()
df = df[(df.index >= | pd.to_datetime(fr) | pandas.to_datetime |
import argparse
import sys
import numpy as np
import pandas as pds
import tensorflow as tf
sys.path.append("clairvoyance")
from datasets import dataset # type: ignore # noqa: E402
from preprocessing import ProblemMaker # type: ignore # noqa: E402
from treatments.treatments import treatment_effects_model # type: ignore # noqa: E402
from clair_helper import get_clair_data, silence_tf # noqa: E402
silence_tf()
parser = argparse.ArgumentParser("Clair Benchmark")
parser.add_argument("--seed", type=str, default="100")
parser.add_argument("--batch_size", type=str, default="100")
parser.add_argument("--sim_id", type=str)
parser.add_argument("--model_name", type=str, default="CRN", choices=["CRN", "RMSN", "GARNITE"])
parser.add_argument("--max_alpha", type=str, default="1.0")
parser.add_argument("--n_hidden", type=str, default="128")
args = parser.parse_args()
seed = int(args.seed)
batch_size = int(args.batch_size)
sim_id = args.sim_id
model_name = args.model_name
max_alpha = float(args.max_alpha)
n_hidden = int(args.n_hidden)
df, df_static, max_seq_len, projection_horizon, n_units, n_units_total, treatment_effect = get_clair_data(
seed, sim_id, "test"
)
tf.set_random_seed(seed)
df_train, df_static_train, _, _, _, _, _ = get_clair_data(seed, sim_id, "train")
df_val, df_static_val, _, _, _, _, _ = get_clair_data(seed, sim_id, "val")
df_static_val["id"] = df_static_val["id"] + 1e6
df_val["id"] = df_val["id"] + 1e6
df_static_train = | pds.concat([df_static_train, df_static_val], ignore_index=True) | pandas.concat |
import re
import json
import os
import string
import pickle
import datetime
import ipdb
import pandas as pd
from reuter_data import logging
NEWS_MONTH = ['07', '08', '09', '10']
NEWS_NUMBER = [14793, 11978, 11337, 9743]
def clean_sentence(s):
s = re.sub("\n", " ", s)
s = re.sub("[" + string.punctuation + "]", " ", s)
s = re.sub(" +", " ", s)
return s.strip()
def collect_webhose_news():
df = pd.DataFrame()
for i, month in enumerate(NEWS_MONTH):
for index in range(1, NEWS_NUMBER[i] + 1):
logging('{0:s}: {1:05d}/{2:05d}'.format(month, index, NEWS_NUMBER[i]))
news_path = "data/{0:s}/news_{1:07d}.json".format(month, index)
with open(news_path) as f:
datum_json = json.load(f)
datum = pd.Series(
data={
'text':datum_json['text'],
'published_time':datum_json['published'],
'country':datum_json['thread']['country'],
'title':datum_json['thread']['title'],
'site':datum_json['thread']['site']
}
)
df = df.append(datum, ignore_index=True)
df.to_csv('webhose_data.csv', index=False)
ipdb.set_trace()
def read_news_dataframe(news_per_day):
df = pd.DataFrame()
date_news_count = {}
for i, month in enumerate(NEWS_MONTH):
for index in range(NEWS_NUMBER[i]):
news_path = "data/{0:s}/news_{1:07d}.json".format(month, NEWS_NUMBER[i] - index)
with open(news_path) as f:
datum_json = json.load(f)
publish_time = pd.to_datetime(datum_json['published'])
date_str = publish_time.strftime('%Y-%m-%d')
start_time = datetime.datetime.strptime('{0:s} 0930'.format(date_str), '%Y-%m-%d %H%M')
end_time = datetime.datetime.strptime('{0:s} 1600'.format(date_str), '%Y-%m-%d %H%M')
if date_str not in date_news_count:
date_news_count[date_str] = 0
if date_news_count[date_str] > news_per_day:
continue
if publish_time <= start_time or publish_time >= end_time:
continue
if datum_json['thread']['country'] != 'US' or 'finance' not in str(datum_json):
continue
text = clean_sentence(datum_json['text'])
if len(text.split(' ')) < 100:
continue
date_news_count[date_str] += 1
datum = pd.Series(
data={
'text':text,
'date':date_str
}
)
df = df.append(datum, ignore_index=True)
#pickle.dump(date_news_count, open("data/date_news_count.p", "wb"))
return df
def read_SNP_dataframe():
snp = pd.read_csv("data/GSPC.csv")
snp['target'] = pd.Series('2015-06-30').append(snp['Date'][0:-1], ignore_index=True)
for offset in range(1, 6):
name = 'previous_price_{0:d}'.format(offset)
snp[name] = pd.Series([None for _ in range(offset)]).append(snp['Close'][0:-offset] - snp['Open'][0:-offset], ignore_index=True)
return snp
def find_price(ticker, timestamp):
date_str = timestamp.strftime('%Y%m%d')
path = 'data/SNP/{0:s}/price/price_{1:s}.json'.format(ticker, date_str)
if not os.path.exists(path):
return None
with open(path, 'rb') as f:
data = pickle.load(f)
start_time = datetime.datetime.strptime(date_str + ' 09:30', '%Y%m%d %H:%M')
offset = int((timestamp - start_time).total_seconds() // 60)
return data[offset]
def generate_reuter_price():
reuter = pd.read_csv('reuter_data.csv')
reuter['published_time'] = pd.to_datetime(reuter['published_time'])
reuter.sort_values('published_time', inplace=True)
twenty_min = datetime.timedelta(minutes=20)
df = pd.DataFrame()
for _, article in reuter.iterrows():
ticker = article['ticker']
published_time = article['published_time']
date_str = published_time.strftime('%Y%m%d')
start_time = datetime.datetime.strptime('{0:s} 0930'.format(date_str), '%Y%m%d %H%M')
end_time = datetime.datetime.strptime('{0:s} 1600'.format(date_str), '%Y%m%d %H%M')
predicted_time = published_time + twenty_min
if published_time > start_time and published_time < end_time and \
predicted_time > start_time and predicted_time < end_time:
price = find_price(ticker, published_time)
predicted_price = find_price(ticker, predicted_time)
if price is None or predicted_price is None:
continue
datum = pd.Series(
data={
'text':clean_sentence(article['text']),
'published_time':published_time,
'predicted_time':predicted_time,
'price':price['marketHigh'],
'predicted_price':predicted_price['marketLow'],
'ticker':article['ticker'],
'name':article['name'],
'title':article['title']
}
)
df = df.append(datum, ignore_index=True)
df.to_csv('reuter_price.csv', index=False)
ipdb.set_trace()
def find_day_price(ticker, timestamp):
date_str = timestamp.strftime('%Y-%m-%d')
path = 'data/price_5y/{0:s}.json'.format(ticker)
if not os.path.exists(path):
return None
with open(path, 'rb') as f:
data = pickle.load(f)
start_time = datetime.datetime.strptime('2015-06-30', '%Y-%m-%d')
target_time = datetime.datetime.strptime(date_str, '%Y-%m-%d')
offset = int((target_time - start_time).days)
if offset > len(data):
offset = len(data) - 1
elif offset < 0:
offset = 0
lower = False
higher = False
while offset >= 0 and offset < len(data) and (not lower or not higher):
guess_time = datetime.datetime.strptime(data[offset]['date'], '%Y-%m-%d')
if guess_time == target_time:
return data[offset]
elif guess_time > target_time:
offset -= 1
higher = True
else:
offset += 1
lower = True
return None
def generate_webhose_price_trend():
webhose = | pd.read_csv('webhose_label.csv') | pandas.read_csv |
import argparse
import time
from timm.utils.metrics import mAP_score
from torch._C import dtype
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
import numpy as np
from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, resume_checkpoint, load_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
# from timm.data import LoadImagesAndLabels,preprocess,LoadImagesAndLabelsV2,LoadImagesAndSoftLabels
from timm.utils import ApexScaler, auc_score
from timm.utils import Visualizer
from timm.data import get_riadd_train_transforms, get_riadd_valid_transforms,get_riadd_test_transforms
from timm.data import RiaddDataSet,RiaddDataSet9Classes
import os
from tqdm import tqdm
import random
import torch.distributed as dist
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
#os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
CFG = {
'seed': 42,
'img_size': 224,
'valid_bs': 10,
'num_workers': 4,
'num_classes': 29,
'tta': 3,
'models': [#'b6-ns-768/tf_efficientnet_b6_ns-768-fold0-model_best.pth.tar',
#'b5-ns-960/tf_efficientnet_b5_ns-960-fold0-model_best.pth.tar',
'20210910-205105-vit_base_patch16_384-384/model_best.pth.tar'],
'base_img_path': 'C:/Users/AI/Desktop/student_Manuel/datasets/RIADD_cropped/Evaluation_Set/Evaluation',
'weights': [1]
}
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def validate(model, loader):
model.eval()
preds = []
pbar = tqdm(enumerate(loader), total=len(loader))
with torch.no_grad():
for batch_idx, (input, target) in pbar:
input = input.cuda()
target = target.cuda()
target = target.float()
output = model(input)
preds.append(output.sigmoid().to('cpu').numpy())
predictions = np.concatenate(preds)
return predictions
if __name__ == '__main__':
from sklearn.model_selection import KFold,StratifiedKFold,GroupKFold
import pandas as pd
import torch.utils.data as data
seed_everything(CFG['seed'])
data_ = | pd.read_csv('C:/Users/AI/Desktop/student_Manuel/datasets/RIADD_cropped/Evaluation_Set/RFMiD_Validation_Labels.csv') | pandas.read_csv |
import os, codecs
import pandas as pd
import numpy as np
PATH = '../input/'
# 共享单车轨迹数据
bike_track = pd.concat([
| pd.read_csv(PATH + 'gxdc_gj20201221.csv') | pandas.read_csv |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all= | pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0) | pandas.read_csv |
import CSSS
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression as LR
import copy
from Custom_Functions.error_functions import rmse_pos
##### Davide Modifications:
# I enriched the calcPerformanceMetrics routine with new performance metrics
# I added the possibility to feed which IDS do not have solar and contrain their generation to 0
# I added a function Mape_mod
class SolarDisagg_IndvHome(CSSS.CSSS):
def __init__(self, netloads, solarregressors, loadregressors, tuningregressors=None, names=None, nosolar_ids = None):
"""
:param netloads: np.array of net loads at each home, with columns corresponding to entries of "names" if
available.
:param solarregressors: np.array of solar regressors (N_s X T)
:param loadregressors: np.array of load regressors (N_l x T)
:param tuningregressors:
:param names:
"""
## Find aggregate net load, and initialize problem.
agg_net_load = np.sum(netloads, axis = 1)
CSSS.CSSS.__init__(self, agg_net_load)
## If no names are input, create names based on id in vector.
self.N, self.M = netloads.shape
if names is None:
self.names = [str(i) for i in np.arange(self.M)]
else:
self.names = names
## Store net loads as a dictionary
self.netloads = {}
for i in range(self.M):
name = self.names[i]
self.netloads[name] = netloads[:,i]
## If no tuning regressors are input, use an intercept only
if tuningregressors is None:
tuningregressors = np.ones((self.N,1))
## Store solar and load regressors, solar regressors, and begin true solar dict
self.solarRegressors = solarregressors
self.loadRegressors = loadregressors
self.tuningRegressors = tuningregressors
self.trueValues = {}
self.nosolar_ids = nosolar_ids
## Cycle through each net load, and create sources.
for source_name in self.names:
self.addSource(regressor=solarregressors, name = source_name, alpha = 1)
## Add constraints that solar generation cannot exceed zero or net load.
self.addConstraint( self.models[source_name]['source'] <= np.array(self.netloads[source_name]) )
self.addConstraint( self.models[source_name]['source'] <= 0 )
self.addConstraint( self.models[source_name]['theta'] >= 0 ) ####################
##################
if self.nosolar_ids is not None:
for source_name in self.nosolar_ids:
self.addConstraint( self.models[source_name]['source'] == 0 )
##################
## Add the aggregate load source
self.addSource(regressor=loadregressors, name = 'AggregateLoad', alpha = 1)
self.addConstraint( self.models['AggregateLoad']['source'] > 0 )
def Solar_var_norm(self):
return(None)
## Placeholder for variance prediction for tuning
def Total_NL_var(self):
return(None)
## Placeholder for variance prediction for tuning
def addTrueValue(self, trueValue, name):
## Function to add true solar for a given model
## Check that true value is correct number of dimensions
trueValue = trueValue.squeeze()
if not (trueValue.shape == (self.N,)):
raise Exception('True value of a solar or load signal must be one dimensional and length N = %d' % self.N)
if name not in (self.names + ['AggregateLoad']):
raise Exception('Must input a valid household identifier or \"AggregateLoad\"')
## Add True Value
self.trueValues[name] = trueValue
return(None)
def calcPerformanceMetrics(self, dropzeros = False):
## Function to calculate performance metrics
# Dropping zeros is intended to remove nightime solar.
df = pd.DataFrame()
df['models'] = self.models.keys()
df['rmse'] = np.zeros(df.shape[0]) * np.nan
df['cv'] = np.zeros(df.shape[0]) * np.nan
df['mae'] = np.zeros(df.shape[0]) * np.nan
df['pmae'] = np.zeros(df.shape[0]) * np.nan
df['mbe'] = np.zeros(df.shape[0]) * np.nan
df['mean'] = np.zeros(df.shape[0]) * np.nan
df['MAPE'] = np.zeros(df.shape[0]) * np.nan
df['mae_max']= np.zeros(df.shape[0]) * np.nan
df['cv_max'] = np.zeros(df.shape[0]) * np.nan
df['max_sol_pred'] = np.zeros(df.shape[0]) * np.nan
df['cv_pos'] = np.zeros(df.shape[0]) * np.nan
df['rmse_pos'] = np.zeros(df.shape[0]) * np.nan
df['mae_pos'] = np.zeros(df.shape[0]) * np.nan
df['pmae_pos'] = np.zeros(df.shape[0]) * np.nan
df['mbe_pos'] = np.zeros(df.shape[0]) * np.nan
df['mean_pos'] = np.zeros(df.shape[0]) * np.nan
df['MAPE_pos'] = np.zeros(df.shape[0]) * np.nan
df['mae_pos_max']= np.zeros(df.shape[0]) * np.nan
df['cv_pos_max'] = np.zeros(df.shape[0]) * np.nan
df = df.set_index('models')
for name in self.trueValues.keys():
truth = self.trueValues[name]
est = np.array(self.models[name]['source'].value).squeeze()
## Calculate metrics.
df.loc[name,'mbe'] = np.mean((truth-est))
df.loc[name,'mean'] = np.mean((truth))
df.loc[name,'rmse'] = np.sqrt(np.mean((truth-est)**2))
df.loc[name,'mae'] = np.mean(np.abs((truth-est)))
df.loc[name,'MAPE'] = MAPE_mod(est,truth,thrs=0.001)
df.loc[name,'max_sol_pred'] = np.max(np.abs(est))
df.loc[name,'rmse_pos'] = rmse_pos(est,truth)
if not (df.loc[name,'mean'] == 0):
df.loc[name,'cv'] = df.loc[name,'rmse'] / np.mean(truth)
df.loc[name,'pmae'] = df.loc[name,'mae'] / np.mean(truth)
df.loc[name,'mae_max'] = df.loc[name,'mae']/ np.max(np.abs(truth))
df.loc[name,'cv_max'] = df.loc[name,'rmse'] / np.max(np.abs(truth))
## Find metrics for positive indices only
posinds = np.abs(truth) > (0.05 * np.abs(np.mean(truth)))
truth = truth[posinds]
est = est[posinds]
# df.loc[name,'rmse_pos'] = np.sqrt(np.mean((truth-est)**2))
df.loc[name,'mae_pos'] = np.mean(np.abs((truth-est)))
df.loc[name,'cv_pos'] = df.loc[name,'rmse_pos'] / np.mean(truth)
df.loc[name,'pmae_pos'] = df.loc[name,'mae_pos'] / np.mean(truth)
df.loc[name,'mbe_pos'] = np.mean((truth-est))
df.loc[name,'mean_pos'] = np.mean((truth))
df.loc[name,'MAPE_pos'] = MAPE_mod(est,truth)
df.loc[name,'mae_pos_max'] = df.loc[name,'mae_pos'] / np.max(np.abs(truth))
df.loc[name,'cv_pos_max'] = df.loc[name,'rmse_pos'] / np.max(np.abs(truth))
self.performanceMetrics = df
return(None)
def tuneAlphas_v1(self, tuneSys = None, filter_vec = np.ones(12)/12.0, var_lb_fraction = 0.01):
## Function to autotune alphas given some true solar information.
if tuneSys is None:
## If no name for a tuning system is input, use all systems for which
# a truth is known.
tuneSys = self.trueValues.keys()
if 'AggregateLoad' in tuneSys: tuneSys.remove('AggregateLoad')
## For each system used for tuning, filter the square residuals.
filt_sq_resid_norm = np.ones((self.N,len(tuneSys)))
i=0
for name in tuneSys:
truth = self.trueValues[name].squeeze()
#modelest = self.models[name]['regressor'] * self.models[name]['theta']
#modelest = np.array(modelest.value).squeeze()
## Run a quick regression to collect expected value of
# the lienar model given truth
model = LR()
model.fit(X = self.models[name]['regressor'], y = truth)
modelest = model.predict(X = self.models[name]['regressor'])
## Create a rough capacity estimate from the theta values
capest = np.sum(self.models[name]['theta'].value)
resid_norm = (truth - modelest) / capest
#filt_resid = convolve_cyc( resid, filter_vec )
#sq_resid_demean = ( resid - filt_resid ) ** 2
sq_resid = resid_norm ** 2
filt_sq_resid_norm[:,i] = convolve_cyc( sq_resid , filter_vec )
i=i+1
## Average the filtered squared residuals
ave_filt_sq_resid_norm = np.mean(filt_sq_resid_norm, axis = 1)
## Create alphas for each other PV system
total_sol_var = np.zeros(self.N) ## Instantiate vector for total variance of PV signals,
total_model_est = np.zeros(self.N) ## Instantiate vector for linear model prediction of net load.
## Cycle through each solar model and tune alphas
for name in self.models.keys():
## Model estimated value
model_est = self.models[name]['regressor'] * self.models[name]['theta'] ## model estimate
model_est = np.array(model_est.value).squeeze()
total_model_est = total_model_est + model_est ## total model estimate
## Don't solve for aggregate load yet
if name.lower() == 'aggregateload':
continue
capest = np.sum(self.models[name]['theta'].value) ### Rough capacity estimate
mean_abs_nl = np.mean(np.abs( self.netloads[name] )) ### Mean absolute net load
lb_var = (mean_abs_nl * var_lb_fraction) ** 2 ### Lower bound on variance
sol_var = ave_filt_sq_resid_norm * (capest ** 2) ### Solar variance (unconstrained)
sol_var[sol_var < lb_var ] = lb_var ### Constrain the solar variance
total_sol_var = total_sol_var + sol_var ### Track the total variance of solar
alpha = sol_var ** -1 ## alpha
self.models[name]['alpha'] = alpha
## Tune load alphas.
lb_var = (np.mean(np.abs(self.aggregateSignal)) * var_lb_fraction) ** 2 ## LOWER BOUND OF VARIANCE, 1%
total_residual_sq = (self.aggregateSignal.squeeze() - total_model_est.squeeze()) ** 2 ## Square residuals of aggregate signal prediction
total_var_filt = convolve_cyc(total_residual_sq, filter_vec) ## Filter square residuals as variance estimate
load_var_est = total_var_filt - total_sol_var ## Estimate of load variance
load_var_est[load_var_est < lb_var] = lb_var ## Enforce lower bound on variance
alpha = load_var_est ** -1
self.models['AggregateLoad']['alpha'] = alpha
## Scale all alphas
self.scaleAlphas()
self.updateSourceObj('all')
return(None)
def fitTuneModels(self, tuneSys = None, var_lb_fraction = 0.05, tuningRegressors = None):
firsttune = True
if tuneSys is None:
## If no name for a tuning system is input, use all systems for which
# a truth is known.
tuneSys = self.trueValues.keys()
if 'AggregateLoad' in tuneSys: tuneSys.remove('AggregateLoad')
## Allow user to place new tuning regressors here.
if tuningRegressors is not None:
self.tuningRegressors = tuningRegressors
## Cycle through each tuning system and collect data for a model.
for name in tuneSys:
truth = self.trueValues[name].squeeze()
## Run a quick regression to collect expected value of
# the lienar model given the true solar signal.
model = LR()
model.fit(X = self.models[name]['regressor'], y = truth)
modelest = model.predict(X = self.models[name]['regressor'])
## Create a rough capacity estimate from the theta values
capest_tune = np.sum(model.coef_)
resid_norm = (truth - modelest) / capest_tune
if firsttune:
firsttune = False
sq_resid_norm = resid_norm ** 2
X = self.tuningRegressors
else:
sq_resid_norm = np.concatenate([ sq_resid_norm , resid_norm ** 2] )
X = np.concatenate([ X, self.tuningRegressors])
# Build model to predict normalized variances
self.Solar_var_norm = LR()
self.Solar_var_norm.fit(y = (sq_resid_norm), X = X)
# Set lower bound for each PV system now
for name, m in self.models.items():
## use the aggregate signal for aggregate load
if name.lower() == 'aggregateload':
mean_abs_nl = np.mean(np.abs(self.aggregateSignal))
else:
mean_abs_nl = np.mean(np.abs(self.netloads[name]))
m['var_lb'] = (mean_abs_nl * var_lb_fraction) ** 2 ### Lower bound on variance
## Build model to predict aggregate net load
model = LR()
X = np.hstack([self.loadRegressors, self.solarRegressors])
model.fit(y = self.aggregateSignal, X = X)
lin_est = model.predict(X = X)
## Collect square residuals and predict them
total_sq_resid = (self.aggregateSignal - lin_est)**2
self.Total_NL_var = LR()
self.Total_NL_var.fit(y = total_sq_resid, X = self.tuningRegressors)
def tuneAlphas(self):
# Instantiate vectors for the total solar variance and total estiamted net load by the model.
total_sol_var = np.zeros(self.N) ## Instantiate vector for total variance of PV signals,
## Cycle through each solar model and tune alphas
pred_sq_resid = self.Solar_var_norm.predict(X = self.tuningRegressors)
for name,m in self.models.items():
## Don't solve for aggregate load yet
if name.lower() == 'aggregateload':
continue
capest = np.sum(m['theta'].value) ### Rough capacity estimate
lb_var = m['var_lb'] ### Lower bound on variance
sol_var = pred_sq_resid * (capest ** 2) ### Solar variance (unconstrained)
sol_var[sol_var < lb_var ] = lb_var ### Constrain the solar variance
total_sol_var = total_sol_var + sol_var ### Track the total variance of solar
alpha = sol_var ** -1 ## alpha
self.models[name]['alpha'] = alpha
## Tune load alphas.
lb_var = self.models['AggregateLoad']['var_lb'] ## LOWER BOUND OF VARIANCE, 1%
total_var_filt = self.Total_NL_var.predict(X = self.tuningRegressors) ## Use linear model to predict total variace
load_var_est = total_var_filt - total_sol_var ## Estimate of load variance
load_var_est[load_var_est < lb_var] = lb_var ## Enforce lower bound on variance
alpha = load_var_est ** -1
self.models['AggregateLoad']['alpha'] = alpha
## Scale all alphas
self.scaleAlphas()
self.updateSourceObj('all')
#return(None)
def scaleAlphas(self, scale_to = 1.0):
## Find the maximum value of alpha
alpha_max = 0
for name, m in self.models.items():
if np.max(m['alpha']) > alpha_max:
alpha_max = np.max(m['alpha'])
## Scale other values of alpha
for name, m in self.models.items():
m['alpha'] = np.array( m['alpha'] / alpha_max * scale_to ).squeeze()
self.updateSourceObj(name)
return(None)
class SolarDisagg_IndvHome_Realtime(CSSS.CSSS):
def __init__(self, sdmod, aggregateNetLoad, solarregressors, loadregressors, tuningregressors = None):
## Inputs
# netloads: np.array of net loads at each home, with columns corresponding to entries of "names" if available.
# solarregressors: np.array of solar regressors (N_s X T)
# loadregressors: np.array of load regressors (N_l x T)
CSSS.CSSS.__init__(self, aggregateNetLoad)
self.N = len(aggregateNetLoad)
self.M = sdmod.M
## If no tuning regressors are input, use an intercept only
if tuningregressors is None:
tuningregressors = np.ones((self.N,1))
## Store solar and load regressors, solar regressors, and begin true solar dict
self.solarRegressors = solarregressors
self.loadRegressors = loadregressors
self.tuningRegressors = tuningregressors
self.trueValues = {}
## Can I inherit methods?
self.Solar_var_norm = sdmod.Solar_var_norm
self.Total_NL_var = sdmod.Total_NL_var
## Inherit properties from the fitted class
self.names = sdmod.names
## Cycle through each net load, and create sources.
for source_name in sdmod.names:
self.addSource(regressor=solarregressors, name = source_name, alpha = 1)
self.models[source_name]['theta'].value = sdmod.models[source_name]['theta'].value
## Assign Capacity estimates and cutoffs for tuning
self.models[source_name]['var_lb'] = sdmod.models[source_name]['var_lb']
## Add constraints that solar generation cannot exceed zero or net load.
self.addConstraint( self.models[source_name]['source'] <= 0 )
## Add the aggregate load source
self.addSource(regressor=loadregressors, name = 'AggregateLoad', alpha = 1)
self.addConstraint( self.models['AggregateLoad']['source'] > 0 )
self.models['AggregateLoad']['theta'].value = sdmod.models['AggregateLoad']['theta'].value
self.models['AggregateLoad']['var_lb'] = sdmod.models['AggregateLoad']['var_lb']
## FixThetas
self.fixThetas()
self.updateSourceObj('all')
## Copy all true trueValues
def tuneAlphas(self):
# Instantiate vectors for the total solar variance and total estiamted net load by the model.
total_sol_var = np.zeros(self.N) ## Instantiate vector for total variance of PV signals,
## Cycle through each solar model and tune alphas
pred_sq_resid = self.Solar_var_norm.predict(X = self.tuningRegressors)
for name,m in self.models.items():
## Don't solve for aggregate load yet
if name.lower() == 'aggregateload':
continue
capest = np.sum(m['theta'] ) ### Rough capacity estimate
lb_var = m['var_lb'] ### Lower bound on variance
sol_var = pred_sq_resid * (capest ** 2) ### Solar variance (unconstrained)
sol_var[sol_var < lb_var ] = lb_var ### Constrain the solar variance
total_sol_var = total_sol_var + sol_var ### Track the total variance of solar
alpha = sol_var ** -1 ## alpha
self.models[name]['alpha'] = alpha
## Tune load alphas.
lb_var = self.models['AggregateLoad']['var_lb'] ## LOWER BOUND OF VARIANCE, 1%
total_var_filt = self.Total_NL_var.predict(X = self.tuningRegressors) ## Use linear model to predict total variace
load_var_est = total_var_filt - total_sol_var ## Estimate of load variance
load_var_est[load_var_est < lb_var] = lb_var ## Enforce lower bound on variance
alpha = load_var_est ** -1
self.models['AggregateLoad']['alpha'] = alpha
## Scale all alphas
self.scaleAlphas()
self.updateSourceObj('all')
#return(None)
def scaleAlphas(self, scale_to = 1.0):
## Find the maximum value of alpha
alpha_max = 0
for name, m in self.models.items():
if np.max(m['alpha']) > alpha_max:
alpha_max = np.max(m['alpha'])
## Scale other values of alpha
for name, m in self.models.items():
m['alpha'] = np.array( m['alpha'] / alpha_max * scale_to ).squeeze()
self.updateSourceObj(name)
return(None)
def addTrueValue(self, trueValue, name):
## Function to add true solar for a given model
## Check that true value is correct number of dimensions
trueValue = trueValue.squeeze()
if not (trueValue.shape == (self.N,)):
raise Exception('True value of a solar or load signal must be one dimensional and length N = %d' % self.N)
if name not in (self.names + ['AggregateLoad']):
raise Exception('Must input a valid household identifier or \"AggregateLoad\"')
## Add True Value
self.trueValues[name] = trueValue
return(None)
def calcPerformanceMetrics(self, dropzeros = False, MAPE = False):
## Function to calculate performance metrics
# Dropping zeros is intended to remove nightime solar.
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from faker import Faker
import random
import datetime
import sys
import xlwt
#import xlrd
import openpyxl
folder_path = sys.path[0]
input_path = folder_path + '\\employees.xlsx' # Name of the input file
# There is a number of different seed functions that should be specified to produce a controlled and consistent output
fake = Faker()
Faker.seed(1)
seed = 7
random.seed(10)
np.random.seed(seed=5)
employees_df = pd.read_excel(input_path, sheet_name='Professional_Profile', engine='openpyxl')
evaluation_performance = {'1': 'Low', '2': 'Medium', '3': 'High'} # Dictionary that will be used for evaluation
# ----------------------- Working with the HR department -------------------------------------------------------------#
# We only extract the useful information for our department to execute calculations faster
department_df = employees_df[employees_df['Department'] == 'HR'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# HR specific evaluation metrics
# Calculating all employees hired by the specific employee
hired_employees_df = employees_df[
(((employees_df['Recruiter ID'] == department_df.at[i, 'ID']) &
(pd.to_datetime(employees_df['Date Hired'], format='%Y-%m-%d') <=
datetime.datetime.strptime(str(calendar_year), '%Y'))))].reset_index()[['ID', 'Date Hired', 'Time Left']]
hired_employees_df['Time in Company'] = 0
# Calculating the exact time that each of the recruited employees worked for the company
for j in hired_employees_df.index:
hired_employees_df.at[j, 'Time in Company'] = 2020 - hired_employees_df.at[j, 'Time Left'] - \
int(hired_employees_df.at[j, 'Date Hired'][0:4])
evaluation['Total Time of hired employees(years)'] = hired_employees_df['Time in Company'].sum() # Total employee time
evaluation['Average Recruitment Time(months)'] = float("{:.2f}".format(np.random.uniform(1, 12))) # Average recruitment time
active_recruits = hired_employees_df[hired_employees_df['Time Left'] == 0]['Time Left'].count() #How many recruits are still working in the company
evaluation['Employees Fired'] = int(0.2*(len(hired_employees_df) - active_recruits)) # 20% of the recruits that left are considered fired
all_evaluations.append(evaluation.copy())
hr_df = pd.DataFrame(all_evaluations)
with pd.ExcelWriter(input_path, engine='openpyxl', mode='a') as writer:
hr_df.to_excel(writer, index=False, sheet_name='HR')
writer.save()
writer.close()
# ------------------------------------------- HR FINISHED --------------------------------------------------------------
# ----------------------- Working with the Sales department ------------------------------------------------------------
# We only extract the useful information for our department to execute calculations faster
department_df = []
department_df = employees_df[employees_df['Department'] == 'Sales'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# Sales specific evaluation metrics
evaluation['Total Sales'] = int(np.random.uniform(1000, 100000))
evaluation['Clients Asking'] = int(np.random.uniform(0, 5))
all_evaluations.append(evaluation.copy())
sales_df = pd.DataFrame(all_evaluations)
with pd.ExcelWriter(input_path, engine='openpyxl', mode='a') as writer:
sales_df.to_excel(writer, index=False, sheet_name='Sales')
writer.save()
writer.close()
# ------------------------------------------- Sales FINISHED -----------------------------------------------------------
# ----------------------- Working with the Product department ---------------------------------------------------------#
# We only extract the useful information for our department to execute calculations faster
department_df = []
department_df = employees_df[employees_df['Department'] == 'Product'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# Product specific evaluation metrics
evaluation['Total Defects'] = int(np.random.uniform(10, 50))
evaluation['Number of Complaining Customers'] = int(np.random.uniform(0, 20))
all_evaluations.append(evaluation.copy())
product_df = pd.DataFrame(all_evaluations)
with pd.ExcelWriter(input_path, engine='openpyxl', mode='a') as writer:
product_df.to_excel(writer, index=False, sheet_name='Product')
writer.save()
writer.close()
# ------------------------------------------- Product FINISHED ---------------------------------------------------------
# ----------------------- Working with the Finance department ---------------------------------------------------------#
# We only extract the useful information for our department to execute calculations faster
department_df = []
department_df = employees_df[employees_df['Department'] == 'Finance'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# Finance specific evaluation metrics
evaluation['Non - Servicing Obligactions'] = int(np.random.uniform(0, 10000))
all_evaluations.append(evaluation.copy())
finance_df = pd.DataFrame(all_evaluations)
with pd.ExcelWriter(input_path, engine='openpyxl', mode='a') as writer:
finance_df.to_excel(writer, index=False, sheet_name='Finance')
writer.save()
writer.close()
# ------------------------------------------- Finance FINISHED ---------------------------------------------------------
# ----------------------- Working with the Legal department ---------------------------------------------------------#
# We only extract the useful information for our department to execute calculations faster
department_df = []
department_df = employees_df[employees_df['Department'] == 'Legal'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# Legal specific evaluation metrics
evaluation['Successful Lawsuits'] = int(np.random.uniform(0, 3))
evaluation['Disputes amicably resolved'] = int(np.random.uniform(0, 6))
all_evaluations.append(evaluation.copy())
legal_df = pd.DataFrame(all_evaluations)
with pd.ExcelWriter(input_path, engine='openpyxl', mode='a') as writer:
legal_df.to_excel(writer, index=False, sheet_name='Legal')
writer.save()
writer.close()
# ------------------------------------------- Legal FINISHED ---------------------------------------------------------
# ----------------------- Working with the Strategy department --------------------------------------------------------#
# We only extract the useful information for our department to execute calculations faster
department_df = []
department_df = employees_df[employees_df['Department'] == 'Strategy'].reset_index()[['ID', 'Date Hired', 'Time Left',
'Salary', 'Working Experience', 'Recruiter ID']]
all_evaluations = [] # Empty list to append the annual evaluations of the department employees
for i in range(len(department_df)):
evaluation = {}
evaluation['ID'] = department_df.at[i, 'ID']
time_in_company = 2020 - department_df.at[i, 'Time Left'] - int(department_df.at[i, 'Date Hired'][0:4])
for year in range(min(5, time_in_company)):
calendar_year = 2020 - department_df.at[i, 'Time Left'] - year
evaluation['Year'] = calendar_year # Calendar year of the specific evaluation record
evaluation['Loyalty'] = calendar_year - int(department_df.at[i, 'Date Hired'][0:4]) # Employee Loyalty
evaluation['Number of Promotions'] = int(evaluation['Loyalty']/4) # Number of promotions of the employee
evaluation['Bonus'] = int(np.random.uniform(0, 30)/100*int(department_df.at[i, 'Salary'])) # Annual Bonus
evaluation['Overtime'] = int(np.random.uniform(0, 20) / 100 * 1816) # Annual working hours are 1816
evaluation['Chargeability'] = int(np.random.uniform(0, 100))
percentile = np.random.uniform(0, 100) # Randomly estimate the percentile of the employee within the department
if percentile < 15:
evaluation['Department Percentile'] = 'Bottom 15%'
evaluation['Performance'] = 'Low'
elif percentile > 85:
evaluation['Department Percentile'] = 'Top 15%'
evaluation['Performance'] = 'High'
else:
evaluation['Department Percentile'] = 'Mid 70%'
evaluation['Performance'] = evaluation_performance[str(int(np.random.uniform(1, 3)))]
# Strategy specific evaluation metrics
evaluation['Total Sales'] = int(np.random.uniform(1000, 10000))
evaluation['Number of Teams'] = int(np.random.uniform(1, 10))
evaluation['Number of Projects'] = int(np.random.uniform(1, 20))
all_evaluations.append(evaluation.copy())
strategy_df = pd.DataFrame(all_evaluations)
with | pd.ExcelWriter(input_path, engine='openpyxl', mode='a') | pandas.ExcelWriter |
from collections import namedtuple
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
test_data = {
"mean radius": 10.80,
"mean texture": 21.98,
"mean perimeter": 68.79,
"mean area": 359.9,
"mean smoothness": 0.08801,
"mean compactness": 0.05743,
"mean concavity": 0.03614,
"mean concave points": 0.2016,
"mean symmetry": 0.05977,
"mean fractal dimension": 0.3077,
"radius error": 1.621,
"texture error": 2.240,
"perimeter error": 20.20,
"area error": 20.02,
"smoothness error": 0.006543,
"compactness error": 0.02148,
"concavity error": 0.02991,
"concave points error": 0.01045,
"symmetry error": 0.01844,
"fractal dimension error": 0.002690,
"worst radius": 12.76,
"worst texture": 32.04,
"worst perimeter": 83.69,
"worst area": 489.5,
"worst smoothness": 0.1303,
"worst compactness": 0.1696,
"worst concavity": 0.1927,
"worst concave points": 0.07485,
"worst symmetry": 0.2965,
"worst fractal dimension": 0.07662,
}
test_df = | pd.DataFrame([test_data]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This script is used to select the subjects with good quality (mean FD, percentages of greater FD, rigid motion).
Then matching SZ and HC based on the age, sex and headmotion.
Note that: these 1322 subjects are already selected by rigid motion criteria: one voxel.
All selected subjects's ID will save to D:/WorkStation_2018/WorkStation_CNN_Schizo/Scale/selected_sub.xlsx
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Statistics')
import pandas as pd
import numpy as np
from lc_chisqure import lc_chisqure
import scipy.stats as stats
import matplotlib.pyplot as plt
# Inputs
scales_whole = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\10-24大表.xlsx'
headmotionfile = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\头动参数_1322.xlsx'
uidfile = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\ID_1322.txt'
scale_206 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\SZ_NC_108_100.xlsx'
# Load
scales_whole = | pd.read_excel(scales_whole) | pandas.read_excel |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
( | Timestamp('2013-06-03', offset='BM') | pandas.Timestamp |
import csv
import pprint
import datetime
import time
import pandas as pd
## Filenames
chicago = 'chicago.csv'
new_york_city = 'new_york_city.csv'
washington = 'washington.csv'
def get_city():
'''Asks the user for a city and returns the filename for that city's bike share data.
Args:
none.
Returns:
(str) Filename for a city's bikeshare data.
'''
city = input('\nHello! Let\'s explore some US bikeshare data!\n'
'Would you like to see data for Chicago, New York, or Washington?\n')
city= city.lower()
if city == 'chicago':
print('Ok, Let\'s explore the data for Chicago!')
return chicago
elif city == 'new york':
print('Ok, Let\'s explore the data for New York!')
return new_york_city
elif city== 'washington':
print('Ok, Let\'s explore the data for Washington')
return washington
else:
print ('Sorry that is not a valid input, please make sure you are using lowercase letters.')
return get_city()
def get_time_period():
'''Asks the user for a time period and returns the specified filter.
Args:
none.
Returns:
(str) Time period for a city's bikeshare data.
'''
time_period = input('\nWould you like to filter the data by month, day, or not at'
' all? Type "none" for no time filter.\n')
if time_period == 'month':
print('Great, let\'s filter the data by month.')
return get_month()
elif time_period == 'day':
print('Great, let\'s filter the data by day.')
return get_day()
elif time_period == 'none':
print('Great, we will not filter the time period.')
return time_period
else:
print('Sorry, that is not a valid input, please try again.')
return get_time_period()
def get_month():
'''Asks the user for a month and returns the specified month.
Args:
none.
Returns:
(str): Returns the month the user chooses.
'''
month = input('\nWhich month? January, February, March, April, May, or June?\n')
month= month.lower()
if month not in ['january', 'february', 'march', 'april','may', 'june']:
print('This input is not valid.')
return get_month()
return month
def get_day():
'''Asks the user for as an integer day and returns the specified day.
Args:
none.
Returns:
(int): Returns the day of the weeek that the user chooses.
'''
day = input('\nWhich day? Please type your response as an integer. For example Mon=0, Tues=1, etc.\n')
if int(day) not in [0,1,2,3,4,5,6]:
print('This input is not valid.')
return get_day()
day = int(day)
return day
def load_df(city):
'''Loads data frame.
Args:
City
Returns:
Loads city CSV using pandas.
'''
df = pd.read_csv(city)
df['Start Time'] = | pd.to_datetime(df['Start Time']) | pandas.to_datetime |
"""
data_production.py
This script was used to produce the mimic data from the paper; as
noted in the text, this data was specifically curated to be as close
to the biological data as possible using mean vector error. As stated
in the text data is meant to demonstrate that the observed data is
possible under the chosen model, not how likely it is to occur.
"""
from util.treatment import Treatment
from util.models import ReliabilityModel
from util.integration_models import *
import pandas as pd
from world.wind import Wind
from world.light import Light
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def circ_mean(angles):
"""
Compute the circular mean of a list of angles. Standard method
is to compute angles to unit vectors, then take the vector average.
See Batschelet (1981) for more information.
:param angles: List of angles to average.
:return: The circular mean.
"""
xs = np.cos(angles)
ys = np.sin(angles)
avg_x = sum(xs) / len(xs)
avg_y = sum(ys) / len(ys)
avg_r = np.sqrt(avg_x**2 + avg_y**2)
avg_t = np.arctan2(avg_y, avg_x)
return (avg_r, avg_t)
def polar_euclidean_distance(a,b):
"""
Euclidean distance between two polar vectors (in tuple form)
:param a: Tuple of form (r, theta)
:param b: Tuple of form (r, theta)
:return: Euc distance between a and b
"""
a_cart = (a[0] * np.cos(a[1]),
a[0] * np.sin(a[1]))
b_cart = (b[0] * np.cos(b[1]),
b[0] * np.sin(b[1]))
return np.sqrt(((a_cart[0] - b_cart[0]) ** 2) + ((a_cart[1] - b_cart[1]) ** 2))
def cue_weight_conditions(sim):
"""
Generate mimic data for the cue weighting experiments. 1.25 and
2.5m/s wind speed across all elevations and conflicts.
:param sim: The simulator with which to produce the data.
:return: Unused
"""
# Data input; this is the biological data used for approximation.
data = pd.read_csv("data/cue_weight_data.csv")
# Generate all experimental conditions
elevations = [45, 60, 75, 86]
conflicts = [0, 60, 120]
windspeeds = [1.25, 2.5]
treatments = dict()
rel_model = ReliabilityModel()
simulator = sim
#
# Treatment production
#
for wind_speed in windspeeds:
for elevation in elevations:
# Condition doesn't exist so don't generate unnecessary work.
if elevation == 45 and wind_speed == 1.25:
print("Skipping condition {}-{}".format(
wind_speed, elevation
))
continue
for conflict in conflicts:
key = "{:.02f}-{}-{:03d}".format(wind_speed, elevation, conflict)
n = len([x for x in list(data[key]) if not np.isnan(x)])
print("{}:{}".format(key,n))
# Generate treatment for each condition with extremely
# large sample sizes to get a better picture of the
# parent population.
treatment = Treatment()
treatment.set_reliability_model(rel_model)
treatment.set_n(n)
treatment.set_id("E:{};C:{}".format(elevation, conflict))
init_light = Light(np.radians(elevation), np.radians(0), treatment)
init_wind = Wind(wind_speed, np.radians(0), treatment)
initial = [init_wind, init_light]
conf_light = Light(np.radians(elevation), np.radians(0), treatment)
conf_wind = Wind(wind_speed, np.radians(conflict), treatment)
conf= [conf_wind, conf_light]
treatment.set_initial_cues(initial)
treatment.set_conflict_cues(conf)
treatments[key] = treatment
print("Treatment generation complete")
# Move into mimic directory for output storage
os.chdir("mimic_distributions/cue_weight")
print(os.getcwd())
#
# For each treatment, search for a sufficiently similar simulated population
#
for key in treatments.keys():
# Compute the target mean
print("Working on {}".format(key))
condition_data = [x for x in list(data[key]) if not np.isnan(x)]
cond_avg = circ_mean(np.radians(condition_data))
# Simulated summary stats
treatment = treatments[key]
#
# Search for sufficiently low error, break when found.
#
while True:
simulator.simulate_treatment(treatment)
changes = treatment.get_changes_in_bearing()
deg_changes = np.degrees(changes)
rounded_changes = [ int(5 * round(x/5)) for x in deg_changes ]
sim_avg = circ_mean(np.radians(rounded_changes))
error = polar_euclidean_distance(cond_avg, sim_avg)
# Chosen error threshold, chosen to be sufficiently similar
if error < 0.02:
break
#
# Store the simulated population information
#
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = | pd.PeriodIndex([p0, p1, p1]) | pandas.PeriodIndex |
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from pandas._libs.tslibs import timezones
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.min}"
)
pac = Timestamp.min.tz_localize("US/Pacific")
assert pac.value > Timestamp.min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.max}"
)
tokyo = Timestamp.max.tz_localize("Asia/Tokyo")
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.tz_localize("US/Pacific")
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp("2015-11-01 01:00:03")
expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central")
expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central")
msg = "Cannot infer dst time from 2015-11-01 01:00:03"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
ts.tz_localize("US/Central")
result = ts.tz_localize("US/Central", ambiguous=True)
assert result == expected0
result = ts.tz_localize("US/Central", ambiguous=False)
assert result == expected1
def test_tz_localize_ambiguous(self):
ts = Timestamp("2014-11-02 01:00")
ts_dst = ts.tz_localize("US/Eastern", ambiguous=True)
ts_no_dst = ts.tz_localize("US/Eastern", ambiguous=False)
assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
msg = "Cannot infer offset with only one time"
with pytest.raises(ValueError, match=msg):
ts.tz_localize("US/Eastern", ambiguous="infer")
# GH#8025
msg = "Cannot localize tz-aware Timestamp, use tz_convert for conversions"
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01", tz="US/Eastern").tz_localize("Asia/Tokyo")
msg = "Cannot convert tz-naive Timestamp, use tz_localize to localize"
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01").tz_convert("Asia/Tokyo")
@pytest.mark.parametrize(
"stamp, tz",
[
("2015-03-08 02:00", "US/Eastern"),
("2015-03-08 02:30", "US/Pacific"),
("2015-03-29 02:00", "Europe/Paris"),
("2015-03-29 02:30", "Europe/Belgrade"),
],
)
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
with pytest.raises(NonExistentTimeError, match=stamp):
ts.tz_localize(tz)
# GH 22644
with pytest.raises(NonExistentTimeError, match=stamp):
ts.tz_localize(tz, nonexistent="raise")
assert ts.tz_localize(tz, nonexistent="NaT") is NaT
def test_tz_localize_ambiguous_raise(self):
# GH#13057
ts = Timestamp("2015-11-1 01:00")
msg = "Cannot infer dst time from 2015-11-01 01:00:00,"
with pytest.raises(AmbiguousTimeError, match=msg):
ts.tz_localize("US/Pacific", ambiguous="raise")
def test_tz_localize_nonexistent_invalid_arg(self):
# GH 22644
tz = "Europe/Warsaw"
ts = Timestamp("2015-03-29 02:00:00")
msg = (
"The nonexistent argument must be one of 'raise', 'NaT', "
"'shift_forward', 'shift_backward' or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
ts.tz_localize(tz, nonexistent="foo")
@pytest.mark.parametrize(
"stamp",
[
"2014-02-01 09:00",
"2014-07-08 09:00",
"2014-11-01 17:00",
"2014-11-05 00:00",
],
)
def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp)
localized = ts.tz_localize(tz)
assert localized == Timestamp(stamp, tz=tz)
msg = "Cannot localize tz-aware Timestamp"
with pytest.raises(TypeError, match=msg):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset == ts
assert reset.tzinfo is None
def test_tz_localize_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
naive = Timestamp("2013-10-27 01:00:00")
pytz_zone = "Europe/London"
dateutil_zone = "dateutil/Europe/London"
result_pytz = naive.tz_localize(pytz_zone, ambiguous=0)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
# fixed ambiguous behavior
# see gh-14621
assert result_pytz.to_pydatetime().tzname() == "GMT"
assert result_dateutil.to_pydatetime().tzname() == "BST"
assert str(result_pytz) == str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (
result_pytz.to_pydatetime().tzname()
== result_dateutil.to_pydatetime().tzname()
)
@pytest.mark.parametrize(
"tz",
[
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
"US/Eastern",
"dateutil/US/Eastern",
],
)
def test_timestamp_tz_localize(self, tz):
stamp = Timestamp("3/11/2012 04:00")
result = stamp.tz_localize(tz)
expected = Timestamp("3/11/2012 04:00", tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_timestamp_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917, 24466
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
ts = | Timestamp(start_ts) | pandas.Timestamp |
import datetime
import time
import pandas as pd
import numpy as np
import tensorflow as tf
import random as rn
import os
import keras
from keras import Input
from keras.models import Sequential, Model
from keras.layers import concatenate
from keras.layers import Dense
from keras.layers import LSTM, Dropout
from keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras import regularizers
import keras as k
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
# Restricting operation to 1 thread for reproducible results.
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# Setting the graph-level random seed.
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def read_data(filename):
df = pd.read_csv(filename, header=None)
df.columns = ['Time', 'PV']
df['Time'] = pd.to_datetime(df["Time"], errors='coerce')
df.index = df["Time"]
df = df.drop(columns=['Time'])
print(df.head())
return df
def pre_processing_data(real_file, hist_file):
df = pd.read_csv(real_file, header=None)
df.columns = ['Time', 'Values']
df['Time'] = pd.to_datetime(df["Time"], errors='coerce')
df.index = df["Time"]
df = df.drop(columns=['Time'])
print("read csv")
print(df.head())
#Changing Frequency of Data to Minutes
df = df.resample('T').mean()
#checking for null values and if any, replacing them with last valid observation
df.isnull().sum()
df.Values.fillna(method='pad', inplace=True)
data = df.values.reshape(-1, 1)
flat_list = [item for sublist in data for item in sublist]
#Quantile Normalization
s = pd.Series(flat_list)
quant = s.quantile(0.75)
Xmin = np.amin(data)
Xmax = quant
X_std = (data - Xmin) / (Xmax - Xmin)
max = 1
min = 0
X_scaled = X_std * (max - min) + min
hist_data = []
start_date_hist = datetime.datetime.strptime("2016-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
with open(hist_file, "r") as f:
data = f.readlines()
data.insert(0, data[-1])
for v in data:
hist_data.append([start_date_hist.strftime("%Y-%m-%d %H:%M:%S"), float(v)])
start_date_hist += datetime.timedelta(hours=1)
hd = pd.DataFrame(hist_data, columns=['Time', 'Values'])
hd['Time'] = pd.to_datetime(hd["Time"], errors='coerce')
hd.index = hd["Time"]
hd = hd.drop(columns=['Time'])
print(hd.head(20))
data = hd.values.reshape(-1, 1)
Xmin = np.amin(data)
Xmax = np.amax(data)
X_std = (data - Xmin) / (Xmax - Xmin)
max = 1
min = 0
X_scaled_hist = X_std * (max - min) + min
return X_scaled, df, X_scaled_hist, hd
def train_model(realXtrain, histXtrain, Ytrain, model, input_size_real, input_size_hist, hidden_size, batch_size,
output_size, Num_Epochs):
#Creating LSTM's structure
if model is None:
print("Training the model..........")
real_input = Input(batch_shape=(batch_size, input_size_real, 1), name="real")
real_features = LSTM(hidden_size, stateful=True, return_sequences=True)(real_input)
hist_input = Input(batch_shape=(batch_size, input_size_hist, 1), name="hist")
hist_features = LSTM(hidden_size, stateful=True, return_sequences=True)(hist_input)
x = concatenate([real_features, hist_features], axis=1)
x = Dropout(0.3)(x)
x = LSTM(hidden_size, stateful=True)(x)
output_layer = Dense(output_size)(x)
model = Model(inputs=[real_input, hist_input], outputs=output_layer)
model.summary()
adam = k.optimizers.Adam(lr=0.01)
model.compile(loss="mean_squared_error", optimizer=adam,
metrics=["mean_squared_error"])
model.compile(loss="mean_squared_error", optimizer=adam,
metrics=["mean_squared_error"])
# define reduceLROnPlateau and early stopping callback
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2,
patience=3, min_lr=0.001)
earlystop = EarlyStopping(monitor='loss', min_delta=0.0001, patience=3, verbose=1, mode='auto')
# define the checkpoint
filepath = "model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [reduce_lr,earlystop,checkpoint]
#Training a stateful LSTM
for i in range(Num_Epochs):
print("Epoch {:d}/{:d}".format(i+1, Num_Epochs))
model.fit({"real": realXtrain, "hist": histXtrain}, Ytrain, batch_size=Batch_Size, epochs=1, verbose=2, callbacks=callbacks_list, shuffle=False)
model.reset_states()
return model
def predict_model(model, realXtest, histXtest, Batch_Size):
#Predicting for the test data
start_time = time.clock()
pred = model.predict({"real": realXtest, "hist": histXtest},batch_size=Batch_Size)
end_time = time.clock()
time_taken = end_time - start_time
return pred[0], time_taken
def find_nearest_hour_index(t):
start_date_hist = datetime.datetime.strptime("2016-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
if t.minute > 30:
t = t.replace(year=2016, minute=0, second=0, microsecond=0) + datetime.timedelta(hours=1)
else:
t = t.replace(year=2016, minute=0, second=0, microsecond=0)
index = int((t - start_date_hist).total_seconds()/3600)
return index
def incremental_algorithm(X_scaled, df, X_scaled_hist, Hist_input_size, look_back, Hidden_Size, Batch_Size, Num_Epochs):
num_features = 1
prediction_horizon = 1440
nb_samples = X_scaled.shape[0] - look_back - prediction_horizon
x_train_reshaped = np.zeros((nb_samples, look_back, num_features))
y_train_reshaped = np.zeros((nb_samples, prediction_horizon))
print("----", X_scaled.shape[0])
print("initial X",x_train_reshaped.shape)
print("initial Y",y_train_reshaped.shape)
train_time = []
prediction_time = []
prediction_error = []
prediction_median = []
prediction_std = []
for i in range(nb_samples):
start_date_index = find_nearest_hour_index(datetime.datetime.strptime(str(df.index[i]), "%Y-%m-%d %H:%M:%S"))
end_date_index = start_date_index + Hist_input_size
histXtrain = X_scaled_hist[start_date_index:end_date_index]
if end_date_index >= len(X_scaled_hist):
histXtrain = histXtrain + X_scaled_hist[0:len(X_scaled_hist)-end_date_index]
histXtrain = np.reshape(histXtrain, (1,) + histXtrain.shape)
print("hist shape "+str(histXtrain.shape))
y_position = i + look_back
y_position_end = y_position + prediction_horizon
x_train_reshaped[i] = X_scaled[i:y_position]
y__re = X_scaled[y_position:y_position_end]
y_train_reshaped[i] = [item for sublist in y__re for item in sublist]
realXtrain = np.reshape(x_train_reshaped[i], (1,) + x_train_reshaped[i].shape)
ytrain = np.reshape(y_train_reshaped[i], (1,) + y_train_reshaped[i].shape)
print("realX train shape : "+str(realXtrain.shape))
start_time = time.clock()
if i == 0:
trained_model = train_model(realXtrain, histXtrain, ytrain, None, look_back, Hist_input_size, Hidden_Size, Batch_Size,
prediction_horizon, Num_Epochs)
else:
trained_model = train_model(realXtrain, histXtrain, ytrain, trained_model, look_back, Hist_input_size, Hidden_Size, Batch_Size,
prediction_horizon, Num_Epochs)
end_time = time.clock()
time_taken = end_time - start_time
predicted_value, predTime = predict_model(trained_model, realXtrain, histXtrain, Batch_Size)
error = abs(ytrain[0] - predicted_value)
error_median = np.median(error)
error_std = np.std(error)
error_mean = np.mean(error)
prediction_median.append(error_median)
prediction_std.append(error_std)
prediction_error.append(error_mean)
train_time.append(time_taken)
prediction_time.append(predTime)
print("The iteration is **** ", i)
return prediction_error, prediction_median, train_time, prediction_time
def post_processing_data(df, prediction_error, prediction_median, train_time, prediction_time):
pred_new_df = df[1440:] # instead of 24 now 1440
new_df_date = pred_new_df[-len(pred_new_df):]
test_act = new_df_date.reset_index()
test_act = test_act.drop('Values', axis =1)
#Adding datetime to prediction error and changing to dataframe
test_predictions_date = pd.DataFrame(prediction_error)
test_predictions_date.columns = ['Values']
test_predictions_date['Time'] = test_act['Time']
#Adding datetime to prediction error median and changing to dataframe
test_predictions_medianError = pd.DataFrame(prediction_median)
test_predictions_medianError.columns = ['Values']
test_predictions_medianError['Time'] = test_act['Time']
print("Average Error is", test_predictions_date['Values'].mean())
#Writing predicitons to a csv file
test_predictions_date.to_csv('MAE_House20.csv')
test_predictions_medianError.to_csv('MedianError_House20.csv')
train_time_date = pd.DataFrame(train_time)
prediction_time_date = | pd.DataFrame(prediction_time) | pandas.DataFrame |
#%%
"""
參考網頁
https://stackoverflow.com/questions/36028759/how-to-open-and-convert-sqlite-database-to-pandas-dataframe
從SQL的表 撈出 做成PD 再試整理
"""
import os
import time
import sqlite3
import numpy as np
import pandas as pd
#%%
# 確認檔案 和 路徑
# print(os.getcwd())
path=os.listdir('./db')
path[0]
path='./db'+'/'+path[0]
# Create your connection.
cnx = sqlite3.connect(path)
# cnx = sqlite3.connect(".\\df\\stockNo_2330.sqlite")
cursor=cnx.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type = "table"')
Tnamelist = cursor.fetchall()
Tdatall=[]
i=Tnamelist[0]
for i in Tnamelist:
print(str(i[0]))
print("SELECT * FROM "+str(i[0]))
df = pd.read_sql_query(("SELECT * FROM "+str(i[0])), cnx)
Tdatall.append(df)
time.sleep(0.2)
cnx.close()
time.sleep(0.5)
dfall = pd.concat(Tdatall)
dfall = dfall.reset_index(drop=True)
# 清除不用的變數
del Tdatall, Tnamelist, df, i, cursor, path
#%%
# 改英文 colname
ch = | pd.DataFrame(dfall.columns, columns=['ch']) | pandas.DataFrame |
#! /usr/bin/env python3
import requests
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import contextily as ctx
import re
import folium
from folium.features import DivIcon
from matplotlib import cm
from matplotlib.colors import to_hex
from datetime import datetime
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--csvin', help='raw data csv', required=True)
args = parser.parse_args()
datetime_fmt = '%Y-%m-%dT%H%M%SZ'
if 1:
filename = args.csvin
basename = filename[ :filename.rfind('.')]
_, start, stop = basename.split('_')
tstart = datetime.strptime(start, datetime_fmt)
tstop = datetime.strptime(stop, datetime_fmt)
print(tstart, tstop)
df = pd.read_csv(args.csvin, sep=';')
# 'occurredAt', 'networkId', 'type', 'description', 'clientId',
# 'clientDescription', 'deviceSerial', 'deviceName', 'ssidNumber',
# 'ssidName', 'eventData'
print(df.columns)
#print(df)
print(df)
df = df.drop_duplicates()
df.occurredAt = pd.to_datetime(df.occurredAt)
df = df.set_index('occurredAt')
print(df)
print(df.groupby('networkId').count()['deviceSerial'])
print(df.groupby('type').count()['deviceSerial'])
print(df.groupby('deviceSerial').count()['networkId'])
print(df.groupby('description').count()['networkId'])
print(df.groupby('clientId').count()['networkId'])
print(df.groupby(['clientDescription']).count()['networkId'])
print(df.groupby(['clientId', 'clientDescription']).count()['networkId'])
# for cid, dfg in df.groupby(['clientId', 'clientDescription']):
# print(cid)
# print(dfg[['deviceSerial', 'description', 'clientDescription']])
sampling_dt_min = 15
freq = f'{sampling_dt_min}T'
tidx = | pd.date_range(start, stop, freq=freq) | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2021 <NAME>
# Erasmus Medical Center
# Department of Genetic Identification
#
# License: GNU General Public License v3 or later
# A copy of GNU GPL v3 should have been included in this software package in LICENSE.txt.
# NestedCV pipeline for Smoking Microbiome sequencing data
import os
import subprocess
import warnings
import library.MLfunctions as func
import pandas as pd
import numpy as np
from collections import Counter
from argparse import ArgumentParser
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import make_scorer, classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import joblib
def get_arguments():
parser = ArgumentParser()
parser.add_argument("-i", "--input", dest = "filename_pos", required = True,
help="table taxa species e.g. data/species_intersect.csv. Taxa as columns, sample names as rows", metavar="FILE")
parser.add_argument("-o", "--output", dest = "output_dir", required = True,
help = "output folder name e.g. data/output/", metavar = "PATH")
parser.add_argument("-m", "--metadata", dest = "filename_pheno", required = True,
help="table with metadata in Tab-separated values TSV e.g. data/metadata.tsv", metavar="FILE")
parser.add_argument("-target", "--target", dest = "target", required = True,
help="Target phenotype name based on the column of metadata e.g. smoking", metavar="STRING")
parser.add_argument("-id", "--id", dest = "id", required = True,
help="Column id sample names on the column of metadata e.g. SRR or Samples", metavar="STRING")
parser.add_argument("-t", "--threads", dest="threads", default = 4, type = int,
help="threads to be used during Nested Cross-Validation e.g. 4",
metavar="INT", required=False)
parser.add_argument("-r", "--root-tree", dest="tree_newick", required = False, default = False,
help="Rooted tree in newick format from ASV consensus e.g. data/TADA/taxa_species_tree.nwk",
metavar="FILE")
parser.add_argument("-ml", "--ml-model", required = False, dest = "model", default = False,
help="Machine learning models, if more than one is selected implements NestedCV and\
reports output and prediction estimations using MCC and AUC,\
otherwise you can select one to export as final model (also needs to be selected an augmented data type)\
[LR, KNN, DT, SVMR, SVML, RF, XG] e.g. LR,KNN,RF or e.g. SVML (will generate one model and export)")
parser.add_argument("-a", "--augmented", required = False, dest = "augmented", default = False,
help = "Data type augmented technique, if no type is selected uses all data types\
[DEFAULT, ADASYN_over, ADASYN_both, SMOTE_both, SMOTE_over, TADA] ")
parser.add_argument("-iter", "--iterations", dest = "iterations", required = False,
help="number of iterations [DEFAULT=10]", metavar="INT", type = int, default = 10)
args = parser.parse_args()
return args
def create_tmp_dirs(folder):
if not os.path.isdir(folder):
cmd = 'mkdir '+folder
subprocess.call(cmd, shell=True)
return True
if __name__ == "__main__":
args = get_arguments()
filename_pos = args.filename_pos
output_dir = args.output_dir + "/"
filename_pheno = args.filename_pheno
## optionals
tree_newick = args.tree_newick
methods = args.augmented
estimators = args.model
target = args.target
id_samples = args.id
threads = args.threads
iterations = args.iterations
if methods == False:
methods = ["DEFAULT", "ADASYN_over", "ADASYN_both", "SMOTE_both", "SMOTE_over"]
if tree_newick:
methods.append("TADA")
else:
methods = methods.split(",")
if estimators == False:
estimators = ["LR", "KNN", "DT", "SVMR", "SVML", "RF", "XG"]
else:
estimators = estimators.split(",")
print("-- Running --")
print("Input taxa species: {}".format(filename_pos))
print("Output directory: {}".format(output_dir))
print("Metadata: {}".format(filename_pheno))
print("You selected the following model(s):")
print(" ".join(estimators))
print("You selected the following augmentation method(s):")
print(" ".join(methods))
print("Number of threads: {}".format(threads))
flag_export_model = False
# If true export models and test set from nestedCV
if len(estimators) == 1 == len(methods):
flag_export_model = True
create_tmp_dirs(output_dir)
mcc_test_list = []
auc_test_list = []
pd_all_metrics = pd.DataFrame()
for i in range(iterations):
print(i+1)
df_metadata = pd.read_csv(filename_pheno, index_col = 0, sep="\t")
df_pos = pd.read_csv(filename_pos, index_col=0)
taxas = df_pos.columns
df_pos.columns = list(range(1, len(df_pos.columns)+1))
print(df_metadata.head())
random_samples_test = func.get_random_samples(df_metadata, target, id_samples)
df_pos = df_pos.loc[~df_pos.index.isin(random_samples_test)] # ignore test samples ~
#print(random_samples_test)
df_metadata = df_metadata.loc[df_metadata[id_samples].isin(df_pos.index)]
df_pos = df_pos.loc[df_metadata[id_samples]]
lb = LabelBinarizer()
y_labels = df_metadata[target].values
y = lb.fit_transform(y_labels).flatten()
X = df_pos.values
for choose in methods:
for estimator in estimators:
pd_model = pd.DataFrame()
list_labels = []
auc_kfold = []
mcc_kfold = []
val_loss_list = []
val_acc_list = []
y_probs_max_list = []
accuracy_list = []
cv_splits = 5 # outer loop
inner_cv_splits = 2
fold = 0
y_probs = np.array(0)
list_names_kfold = []
df_activations = pd.DataFrame()
kfold = StratifiedKFold(n_splits=cv_splits, shuffle=True)
print("############################################")
print("Augmentation: {} ML Model: {}".format(choose, estimator))
print("############################################")
metrics_models = {}
scores_mcc = []
scores_auc = []
for index_train, index_test in kfold.split(X, y):
print("------------------")
fold+=1
print(f"Fold #{fold}")
X_train = X[index_train]
y_train = y[index_train]
X_test = X[index_test]
y_test = y[index_test]
if choose == "DEFAULT":
pass
if choose == "TADA":
obs_ids = [str(i) for i in df_pos.columns]
X_train, y_train = func.tada_augmentation(X_train, y_train, obs_ids, fold, tree_newick)
elif choose == "ADASYN_over":
ada = ADASYN(sampling_strategy='minority', n_neighbors = 5, n_jobs = threads)
X_train, y_train = ada.fit_resample(X_train,y_train)
elif choose == "SMOTE_both":
X_train, y_train = func.smote_both(X_train, y_train)
elif choose == "ADASYN_both":
X_train, y_train = func.adasyn_both(X_train, y_train)
elif choose == "SMOTE_over":
X_train, y_train = SMOTE().fit_resample(X_train, y_train)
X_train = func.total_sum( | pd.DataFrame(X_train) | pandas.DataFrame |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
| pd.Timestamp('2019-08-06 00:00:00') | pandas.Timestamp |
import unittest
import pandas as pd
# fix to allow zip_longest on Python 2.X and 3.X
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
from math import fabs
from mock import patch, sentinel, Mock, MagicMock
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.Execution import Execution
from ib.ext.OrderState import OrderState
from zipline.gens.brokers.ib_broker import IBBroker, TWSConnection
from zipline.testing.fixtures import WithSimParams
from zipline.finance.execution import (StopLimitOrder,
MarketOrder,
StopOrder,
LimitOrder)
from zipline.finance.order import ORDER_STATUS
from zipline.testing.fixtures import (ZiplineTestCase,
WithDataPortal)
@unittest.skip("Failing on CI - Fix later")
class TestIBBroker(WithSimParams,
WithDataPortal,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
ASSET_FINDER_EQUITY_SYMBOLS = ("SPY", "XIV")
@staticmethod
def _tws_bars():
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
tws = TWSConnection("localhost:9999:1111")
tws._add_bar('SPY', 12.4, 10,
pd.to_datetime('2017-09-27 10:30:00', utc=True),
10, 12.401, False)
tws._add_bar('SPY', 12.41, 10,
pd.to_datetime('2017-09-27 10:30:40', utc=True),
20, 12.411, False)
tws._add_bar('SPY', 12.44, 20,
pd.to_datetime('2017-09-27 10:31:10', utc=True),
40, 12.441, False)
tws._add_bar('SPY', 12.74, 5,
pd.to_datetime('2017-09-27 10:37:10', utc=True),
45, 12.741, True)
tws._add_bar('SPY', 12.99, 15,
pd.to_datetime('2017-09-27 12:10:00', utc=True),
60, 12.991, False)
tws._add_bar('XIV', 100.4, 100,
pd.to_datetime('2017-09-27 9:32:00', utc=True),
100, 100.401, False)
tws._add_bar('XIV', 100.41, 100,
pd.to_datetime('2017-09-27 9:32:20', utc=True),
200, 100.411, True)
tws._add_bar('XIV', 100.44, 200,
pd.to_datetime('2017-09-27 9:41:10', utc=True),
400, 100.441, False)
tws._add_bar('XIV', 100.74, 50,
pd.to_datetime('2017-09-27 11:42:10', utc=True),
450, 100.741, False)
return tws.bars
@staticmethod
def _create_contract(symbol):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = 'STK'
return contract
@staticmethod
def _create_order(action, qty, order_type, limit_price, stop_price):
order = Order()
order.m_action = action
order.m_totalQuantity = qty
order.m_auxPrice = stop_price
order.m_lmtPrice = limit_price
order.m_orderType = order_type
return order
@staticmethod
def _create_order_state(status_):
status = OrderState()
status.m_status = status_
return status
@staticmethod
def _create_exec_detail(order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id):
exec_detail = Execution()
exec_detail.m_orderId = order_id
exec_detail.m_shares = shares
exec_detail.m_cumQty = cum_qty
exec_detail.m_price = price
exec_detail.m_avgPrice = avg_price
exec_detail.m_time = exec_time
exec_detail.m_execId = exec_id
return exec_detail
@patch('zipline.gens.brokers.ib_broker.TWSConnection')
def test_get_spot_value(self, tws):
dt = None # dt is not used in real broker
data_freq = 'minute'
asset = self.asset_finder.retrieve_asset(1)
bars = {'last_trade_price': [12, 10, 11, 14],
'last_trade_size': [1, 2, 3, 4],
'total_volume': [10, 10, 10, 10],
'vwap': [12.1, 10.1, 11.1, 14.1],
'single_trade_flag': [0, 1, 0, 1]}
last_trade_times = [pd.to_datetime('2017-06-16 10:30:00', utc=True),
pd.to_datetime('2017-06-16 10:30:11', utc=True),
pd.to_datetime('2017-06-16 10:30:30', utc=True),
pd.to_datetime('2017-06-17 10:31:9', utc=True)]
index = pd.DatetimeIndex(last_trade_times)
broker = IBBroker(sentinel.tws_uri)
tws.return_value.bars = {asset.symbol: pd.DataFrame(
index=index, data=bars)}
price = broker.get_spot_value(asset, 'price', dt, data_freq)
last_trade = broker.get_spot_value(asset, 'last_traded', dt, data_freq)
open_ = broker.get_spot_value(asset, 'open', dt, data_freq)
high = broker.get_spot_value(asset, 'high', dt, data_freq)
low = broker.get_spot_value(asset, 'low', dt, data_freq)
close = broker.get_spot_value(asset, 'close', dt, data_freq)
volume = broker.get_spot_value(asset, 'volume', dt, data_freq)
# Only the last minute is taken into account, therefore
# the first bar is ignored
assert price == bars['last_trade_price'][-1]
assert last_trade == last_trade_times[-1]
assert open_ == bars['last_trade_price'][1]
assert high == max(bars['last_trade_price'][1:])
assert low == min(bars['last_trade_price'][1:])
assert close == bars['last_trade_price'][-1]
assert volume == sum(bars['last_trade_size'][1:])
def test_get_realtime_bars_produces_correct_df(self):
bars = self._tws_bars()
with patch('zipline.gens.brokers.ib_broker.TWSConnection'):
broker = IBBroker(sentinel.tws_uri)
broker._tws.bars = bars
assets = (self.asset_finder.retrieve_asset(1),
self.asset_finder.retrieve_asset(2))
realtime_history = broker.get_realtime_bars(assets, '1m')
asset_spy = self.asset_finder.retrieve_asset(1)
asset_xiv = self.asset_finder.retrieve_asset(2)
assert asset_spy in realtime_history
assert asset_xiv in realtime_history
spy = realtime_history[asset_spy]
xiv = realtime_history[asset_xiv]
assert list(spy.columns) == ['open', 'high', 'low', 'close', 'volume']
assert list(xiv.columns) == ['open', 'high', 'low', 'close', 'volume']
# There are 159 minutes between the first (XIV @ 2017-09-27 9:32:00)
# and the last bar (SPY @ 2017-09-27 12:10:00)
assert len(realtime_history) == 159
spy_non_na = spy.dropna()
xiv_non_na = xiv.dropna()
assert len(spy_non_na) == 4
assert len(xiv_non_na) == 3
assert spy_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 10:30:00', utc=True)
assert spy_non_na.iloc[0].open == 12.40
assert spy_non_na.iloc[0].high == 12.41
assert spy_non_na.iloc[0].low == 12.40
assert spy_non_na.iloc[0].close == 12.41
assert spy_non_na.iloc[0].volume == 20
assert spy_non_na.iloc[1].name == pd.to_datetime(
'2017-09-27 10:31:00', utc=True)
assert spy_non_na.iloc[1].open == 12.44
assert spy_non_na.iloc[1].high == 12.44
assert spy_non_na.iloc[1].low == 12.44
assert spy_non_na.iloc[1].close == 12.44
assert spy_non_na.iloc[1].volume == 20
assert spy_non_na.iloc[-1].name == pd.to_datetime(
'2017-09-27 12:10:00', utc=True)
assert spy_non_na.iloc[-1].open == 12.99
assert spy_non_na.iloc[-1].high == 12.99
assert spy_non_na.iloc[-1].low == 12.99
assert spy_non_na.iloc[-1].close == 12.99
assert spy_non_na.iloc[-1].volume == 15
assert xiv_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 9:32:00', utc=True)
assert xiv_non_na.iloc[0].open == 100.4
assert xiv_non_na.iloc[0].high == 100.41
assert xiv_non_na.iloc[0].low == 100.4
assert xiv_non_na.iloc[0].close == 100.41
assert xiv_non_na.iloc[0].volume == 200
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_new_order_appears_in_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert len(broker.orders) == 1
assert broker.orders[order.id] == order
assert order.open
assert order.asset == asset
assert order.amount == amount
assert order.limit == limit_price
assert order.stop == stop_price
assert (order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_open_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
ib_order_id = 3
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 40, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.HELD
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_exec_details(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
(req_id, ib_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id) = (7, 3, 12, 40,
12.43, 12.50,
'20160101 14:20', 4)
ib_contract = self._create_contract(str(asset.symbol))
exec_detail = self._create_exec_detail(
ib_order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id)
broker._tws.execDetails(req_id, ib_contract, exec_detail)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_updated_from_order_status(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
# orderStatus calls only work if a respective order has been created
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
ib_order_id = order.broker_order_id
status = 'Filled'
filled = 14
remaining = 9
avg_fill_price = 12.4
perm_id = 99
parent_id = 88
last_fill_price = 12.3
client_id = 1111
why_held = ''
broker._tws.orderStatus(ib_order_id,
status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id,
why_held)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.FILLED
assert not zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == amount
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_multiple_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
order_count += 1
assert order_count == len(broker.orders)
assert broker.orders[order.id] == order
is_buy = amount > 0
assert order.stop == order_style.get_stop_price(is_buy)
assert order.limit == order_style.get_limit_price(is_buy)
def test_order_ref_serdes(self):
# Even though _creater_order_ref and _parse_order_ref is private
# it is helpful to test as it plays a key role to re-create orders
order = self._create_order("BUY", 66, "STP LMT", 13.4, 44.2)
serialized = IBBroker._create_order_ref(order)
deserialized = IBBroker._parse_order_ref(serialized)
assert deserialized['action'] == order.m_action
assert deserialized['qty'] == order.m_totalQuantity
assert deserialized['order_type'] == order.m_orderType
assert deserialized['limit_price'] == order.m_lmtPrice
assert deserialized['stop_price'] == order.m_auxPrice
assert (deserialized['dt'] - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_transactions_not_created_for_incompl_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert not broker.transactions
assert len(broker.orders) == 1
assert broker.orders[order.id].open
ib_order_id = order.broker_order_id
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 4, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
broker._tws.orderStatus(ib_order_id, status='Cancelled', filled=0,
remaining=4, avg_fill_price=0.0, perm_id=4,
parent_id=4, last_fill_price=0.0, client_id=32,
why_held='')
assert not broker.transactions
assert len(broker.orders) == 1
assert not broker.orders[order.id].open
broker._tws.orderStatus(ib_order_id, status='Inactive', filled=0,
remaining=4, avg_fill_price=0.0, perm_id=4,
parent_id=4, last_fill_price=0.0,
client_id=1111, why_held='')
assert not broker.transactions
assert len(broker.orders) == 1
assert not broker.orders[order.id].open
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_transactions_created_for_complete_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
broker._tws.orderStatus(order.broker_order_id, 'Filled',
filled=int(fabs(amount)), remaining=0,
avg_fill_price=111, perm_id=0, parent_id=1,
last_fill_price=112, client_id=1111,
why_held='')
contract = self._create_contract(str(asset.symbol))
(shares, cum_qty, price, avg_price, exec_time, exec_id) = \
(int(fabs(amount)), int(fabs(amount)), 12.3, 12.31,
pd.to_datetime('now', utc=True), order_count)
exec_detail = self._create_exec_detail(
order.broker_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id)
broker._tws.execDetails(0, contract, exec_detail)
order_count += 1
assert len(broker.transactions) == order_count
transactions = [tx
for tx in broker.transactions.values()
if tx.order_id == order.id]
assert len(transactions) == 1
assert broker.transactions[exec_id].asset == asset
assert broker.transactions[exec_id].amount == order.amount
assert (broker.transactions[exec_id].dt -
pd.to_datetime('now', utc=True) < | pd.Timedelta('10s') | pandas.Timedelta |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
DemandResponse.py
This Python class contains methods and attributes specific for service analysis within StorageVet.
"""
from storagevet.ValueStreams.ValueStream import ValueStream
import pandas as pd
import cvxpy as cvx
import numpy as np
from storagevet.ErrorHandling import *
from storagevet.SystemRequirement import Requirement
import storagevet.Library as Lib
SATURDAY = 5
class DemandResponse(ValueStream):
""" Demand response program participation. Each service will be daughters of the ValueStream class.
"""
def __init__(self, params):
""" Generates the objective function, finds and creates constraints.
Args:
params (Dict): input parameters
"""
# generate the generic service object
ValueStream.__init__(self, 'Demand Response', params)
# add dr specific attributes to object
self.days = params['days']
self.length = params.get('length') # length of an event
self.weekend = params['weekend']
self.start_hour = params['program_start_hour']
self.end_hour = params.get('program_end_hour') # last hour of the program
self.day_ahead = params['day_ahead'] # indicates whether event is scheduled in real-time or day ahead
# handle length and end_hour attributes here
self.fill_dr_event_details()
# timeseries data
self.system_load = params['system_load']
self.months = params['dr_months'] == 1
self.cap_commitment = params['dr_cap'] # this is the max capacity a user is willing to provide
# monthly data
self.cap_monthly = params['cap_monthly']
self.cap_price = params['cap_price']
self.ene_price = params['ene_price']
# the following attributes will be used to save values during analysis
self.qc = None
self.qe = None
self.charge_max_constraint = pd.Series()
self.discharge_min_constraint = pd.Series()
self.energy_min_constraint = pd.Series()
self.possible_event_times = None
def fill_dr_event_details(self):
""" Ensure consistency in length and end_hour attributes
This will set the length attribute accordingly if it's None initially
This will set the end_hour attribute accordingly if it's None initially
Will raise an error if both values are not consistent
Note: if both are None an error is raised first in Params class
"""
if isinstance(self.length, str):
TellUser.error(f'Demand Response: the value of event length ({self.length}) is not supported')
raise ModelParameterError(f'Demand Response: the event value of length ({self.length}) is not supported')
if isinstance(self.end_hour, str):
TellUser.error(f'Demand Response: the value of program_end_hour ({self.end_hour}) is not supported')
raise ModelParameterError(f'Demand Response: the value of program_end_hour ({self.end_hour}) is not supported')
if self.length is None:
self.length = self.end_hour - self.start_hour + 1
elif self.end_hour is None:
self.end_hour = self.start_hour + self.length - 1
# require that LENGTH < END_HOUR - START_HOUR
if self.length != self.end_hour - self.start_hour + 1:
TellUser.error(f'Demand Response: event length ({self.length}) is not program_end_hour ({self.end_hour}) - program_start_hour ({self.start_hour}). '
+ 'This is ambiguous. '
+ 'Please provide either program_end_hour or length for day ahead scheduling')
raise ModelParameterError(f'Demand Response: event length ({self.length}) is not program_end_hour ({self.end_hour}) - program_start_hour ({self.start_hour}). '
+ 'This is ambiguous. '
+ 'Please provide either program_end_hour or length for day ahead scheduling')
def grow_drop_data(self, years, frequency, load_growth):
""" Update variable that hold timeseries data after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of loads in this simulation
"""
# timeseries data
self.system_load = Lib.fill_extra_data(self.system_load, years, load_growth, frequency)
self.system_load = Lib.drop_extra_data(self.system_load, years)
self.months = Lib.fill_extra_data(self.months, years, 0, frequency)
self.months = Lib.drop_extra_data(self.months, years)
self.cap_commitment = Lib.fill_extra_data(self.cap_commitment, years, 0, frequency)
self.cap_commitment = Lib.drop_extra_data(self.cap_commitment, years)
# monthly data
self.cap_monthly = Lib.fill_extra_data(self.cap_monthly, years, 0, 'M')
self.cap_monthly = Lib.drop_extra_data(self.cap_monthly, years)
self.cap_price = Lib.fill_extra_data(self.cap_price, years, 0, 'M')
self.cap_price = Lib.drop_extra_data(self.cap_price, years)
self.ene_price = Lib.fill_extra_data(self.ene_price, years, 0, 'M')
self.ene_price = Lib.drop_extra_data(self.ene_price, years)
def calculate_system_requirements(self, der_lst):
""" Calculate the system requirements that must be meet regardless of what other value streams are active
However these requirements do depend on the technology that are active in our analysis
Args:
der_lst (list): list of the initialized DERs in our scenario
"""
max_discharge_possible = self.qualifying_commitment(der_lst, self.length)
if self.day_ahead:
# if events are scheduled the "day ahead", exact time of the event is known and we can plan accordingly
indx_dr_days = self.day_ahead_event_scheduling()
else:
# power reservations instead of absolute constraints
# if events are scheduled the "Day of", or if the start time of the event is uncertain, apply at every possible start
indx_dr_days = self.day_of_event_scheduling()
qc = np.minimum(self.cap_commitment.loc[indx_dr_days].values, max_discharge_possible)
self.qc = pd.Series(qc, index=indx_dr_days, name='DR Discharge Min (kW)')
self.possible_event_times = indx_dr_days
if self.day_ahead:
self.charge_max_constraint = pd.Series(np.zeros(len(indx_dr_days)), index=indx_dr_days, name='DR Charge Max (kW)')
self.discharge_min_constraint = pd.Series(qc, index=indx_dr_days, name='DR Discharge Min (kW)')
self.system_requirements += [Requirement('discharge', 'min', self.name, self.discharge_min_constraint),
Requirement('charge', 'max', self.name, self.charge_max_constraint)]
else:
self.qualifying_energy()
def day_ahead_event_scheduling(self):
""" If Dr events are scheduled with the STORAGE operator the day before the event, then the operator knows
exactly when these events will occur.
We need to make sure that the storage can perform, IF called upon, by making the battery can discharge atleast enough
to meet the qualifying capacity and reserving enough energy to meet the full duration of the event
START_HOUR is required. A user must also provide either END_HOUR or LENGTH
Returns: index for when the qualifying capacity must apply
"""
index = self.system_load.index
he = index.hour + 1 # hour ending
##########################
# FIND DR EVENTS: system load -> group by date -> filter by DR hours -> sum system load energy -> filter by top n days
##########################
# dr program is active based on month and if hour is in program hours
active = self.months & (he >= self.start_hour) & (he <= self.end_hour)
# remove weekends from active datetimes if dr_weekends is False
if not self.weekend:
active = active & (active.index.weekday < SATURDAY).astype('int64')
# 1) system load, during ACTIVE time-steps from largest to smallest
load_during_active_events = self.system_load.loc[active]
# 2) system load is groupby by date and summed and multiplied by DT
sum_system_load_energy = load_during_active_events.groupby(by=load_during_active_events.index.date).sum() * self.dt
# 3) sort the energy per event and select peak time-steps
# find number of events in month where system_load is at peak during active hours: select only first DAYS number of timestamps, per month
disp_days = sum_system_load_energy.sort_values(ascending=False)[:self.days]
# create a mask that is true when ACTIVE is true and the date is in DISP_DAYS.INDEX
active_event_mask = pd.Series(np.repeat(False, len(index)), index=index)
for date in disp_days.index:
active_event_mask = (index.date == date) & active | active_event_mask
# create index for power constraint
indx_dr_days = active_event_mask.loc[active_event_mask].index
return indx_dr_days
def day_of_event_scheduling(self):
""" If the DR events are scheduled the day of, then the STORAGE operator must prepare for an event to occur
everyday, to start at any time between the program START_HOUR and ending on END_HOUR for the duration of
LENGTH.
In this case there must be a power reservations because the storage might or might not be called, so there must
be enough power capacity to provide for all services.
Returns: index for when the qualifying capacity must apply
"""
##########################
# FIND DR EVENTS
##########################
index = self.system_load.index
he = index.hour + 1
# dr program is active based on month and if hour is in program hours
active = self.months & (he >= self.start_hour) & (he <= self.end_hour)
# remove weekends from active datetimes if dr_weekends is False
if not self.weekend:
active = active & (active.index.weekday < SATURDAY).astype('int64')
return active.loc[active].index
@staticmethod
def qualifying_commitment(der_lst, length):
"""
Args:
der_lst (list): list of the initialized DERs in our scenario
length (int): length of the event
NOTE: RA has this same method too -HN
"""
qc = sum(der_instance.qualifying_capacity(length) for der_instance in der_lst)
return qc
def qualifying_energy(self):
""" Calculated the qualifying energy to be able to participate in an event.
This function should be called after calculating the qualifying commitment.
"""
qe = self.qc * self.length # qualifying energy timeseries dataframe
# only apply energy constraint whenever an event could start. So we need to get rid of the last (SELF.LENGTH*SELF.DT) - 1 timesteps per event
last_start = self.end_hour - self.length # this is the last time that an event can start
mask = qe.index.hour <= last_start
self.qe = qe.loc[mask]
def p_reservation_discharge_up(self, mask):
""" the amount of discharge power in the up direction (supplying power up into the grid) that
needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
in the subs data set
Returns: CVXPY parameter/variable
"""
if not self.day_ahead:
# make sure we will be able to discharge if called upon (in addition to other market services)
subs = mask.loc[mask]
dis_reservation = pd.Series(np.zeros(sum(mask)), index=subs.index)
subs_qc = self.qc.loc[self.qc.index.isin(subs.index)]
if not subs_qc.empty:
dis_reservation.update(subs_qc)
down = cvx.Parameter(shape=sum(mask), value=dis_reservation.values, name='DischargeResDR')
else:
down = super().p_reservation_discharge_up(mask)
return down
def proforma_report(self, opt_years, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
opt_years (list): list of years the optimization problem ran for
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame): DataFrame with all the optimization variable solutions
Returns: A tuple of a DateFrame (of with each year in opt_year as the index and the corresponding
value this stream provided)
"""
proforma = ValueStream.proforma_report(self, opt_years, apply_inflation_rate_func,
fill_forward_func, results)
proforma[self.name + ' Capacity Payment'] = 0
proforma[self.name + ' Energy Payment'] = 0
energy_displaced = results.loc[self.possible_event_times, 'Total Storage Power (kW)']
energy_displaced += results.loc[self.possible_event_times, 'Total Generation (kW)']
for year in opt_years:
year_cap_price = self.cap_price.loc[self.cap_price.index.year == year]
year_monthly_cap = self.cap_monthly.loc[self.cap_monthly.index.year == year]
proforma.loc[ | pd.Period(year=year, freq='y') | pandas.Period |
import pandas as pd
import numpy as np
import os
def tsp_df(self):
df = self.df
df1 = pd.DataFrame()
for pid, g in df.groupby(['auto_participant_id']):
g['index_raw'] = g.index
n = 0
for i, r in g.iterrows():
n = n + 1
g.at[i, 'index_raw'] = n
df1 = | pd.concat([df1, g], sort=False) | pandas.concat |
# Programa para mostrar el tipo de cambio MXN:USD
# Para un periodo de fechas.
# Imports del Programa
######################
import os
import requests
import pandas as pd
# Fechas para el calculo
########################
print("\n Busqueda de FX para Solventar Obligaciones: \n")
fecha_inicial = input("Fecha Inicial de Busqueda yyyy-mm-dd: ")
fecha_final = input("Fecha Final de Busqueda yyyy-mm-dd: ")
# Conexion a Banxico
####################
token = os.environ.get("token_banxico")
# Token de Consulta Banxico
obligaciones = "<PASSWORD>" # FX Para Solventar Obligaciones
# Clave de Descarga Banxico
# Funcion de descarga de datos
##############################
def descarga_bmx_serie(serie, fechainicio, fechafin, token):
# Al site de banxico se le pegan los datos de consulta
url = ("https://www.banxico.org.mx/SieAPIRest/service/v1/series/"
+ serie
+ "/datos/"
+ fechainicio
+ "/"
+ fechafin
)
# Se le tienen que pasar Headers
headers = {"Bmx-Token": token}
# Se pasa como un request con metodo get
response = requests.get(url, headers=headers)
# Se le solicita el codigo de respuesta al servidor.
status = response.status_code
if status == 200:
# Si el estatus esta Ok crear el dataframe
raw_data = response.json()
# Se guarda la respuesta como una variable.
data = raw_data["bmx"]["series"][0]["datos"]
# Se filtra el json
# Se accesa el diccionario con los datos
global df
# Hacemos que la variable df sea global para poder accesarla despues
df = | pd.DataFrame(data) | pandas.DataFrame |
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ = pd.Series(std)
var = std_ * std_
return cls.from_var(var)
@classmethod
def from_stack(cls, data_stack, index, columns, values, rows=10000000,
kind='upper'):
"""
Create a covariance matrix from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
kind : `str`, optional
Select if the stack data represents upper or lower triangular
matrix. The default is 'upper.
Returns
-------
`sandy.CategoryCov`
Covarinace matrix.
Examples
--------
If the stack data represents the covariance matrix:
>>> S = pd.DataFrame(np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> S = S[S['cov'] != 0]
>>> sandy.CategoryCov.from_stack(S, index=['dim1'], columns=['dim2'], values='cov', kind='all')
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 1.00000e+00 2.00000e+00 1.00000e+00
2 1.00000e+00 1.00000e+00 1.00000e+00
If the stack data represents only the upper triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL').data
>>> test_1
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL', rows=1).data
>>> test_2
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
If the stack data represents only the lower triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower").data
>>> test_1
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower", rows=1).data
>>> test_2
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
"""
cov = segmented_pivot_table(data_stack, rows=rows, index=index,
columns=columns, values=values)
if kind == 'all':
return cls(cov)
else:
return triu_matrix(cov, kind=kind)
def _gls_Vy_calc(self, S, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `Vy_calc` calculated using
S.dot(Vx_prior).dot(S.T)
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov._gls_Vy_calc(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_Vy_calc(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
index = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
rows_ = S_.shape[0] if rows is None else rows
Vy_calc = sparse_tables_dot_multiple([S_, self.data.values,
S_.T], rows=rows_)
return pd.DataFrame(Vy_calc, index=index, columns=index)
def _gls_G(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional.
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G` calculated using
S.dot(Vx_prior).dot(S.T) + Vy_extra
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G(S, Vy)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S, Vy, rows=1)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_G(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
# GLS_sensitivity:
Vy_calc = self._gls_Vy_calc(S, rows=rows)
if Vy_extra is not None:
# Data in a appropriate format
Vy_extra_ = sandy.CategoryCov(Vy_extra).data
index = pd.DataFrame(Vy_extra).index
Vy_extra_ = Vy_extra_.values
Vy_calc = Vy_calc.reindex(index=index, columns=index).fillna(0).values
# Calculations:
Vy_calc = sps.csr_matrix(Vy_calc)
Vy_extra_ = sps.csr_matrix(Vy_extra_)
# G calculation
G = Vy_calc + Vy_extra_
G = pd.DataFrame(G.toarray(), index=index, columns=index)
else:
G = Vy_calc
return G
def _gls_G_inv(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G_inv` calculated using
(S.dot(Vx_prior).dot(S.T) + Vy_extra)^-1
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G_inv(S, Vy)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S, Vy, rows=1)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
>>> cov._gls_G_inv(S, rows=1)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
"""
if Vy_extra is not None:
index = pd.DataFrame(Vy_extra).index
G = self._gls_G(S, Vy_extra=Vy_extra, rows=rows).values
else:
index = pd.DataFrame(S).index
G = self._gls_Vy_calc(S, rows=rows).values
G_inv = sandy.CategoryCov(G).invert(rows=rows).data.values
return pd.DataFrame(G_inv, index=index, columns=index)
def _gls_general_sensitivity(self, S, Vy_extra=None,
rows=None, threshold=None):
"""
Method to obtain general sensitivity according to GLS
.. math::
$$
V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`GLS`
GLS sensitivity for a given Vy_extra and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_general_sensitivity(S, Vy)
0 1
0 -2.00000e-01 2.00000e-01
1 2.28571e-01 5.71429e-02
>>> S = pd.DataFrame([[1, 2], [3, 4]], index=[1, 2],columns=[3, 4])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy, rows=1)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
>>> cov._gls_general_sensitivity(S, rows=1)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
"""
index = pd.DataFrame(S).columns
columns = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
# GLS_sensitivity:
G_inv = self._gls_G_inv(S, Vy_extra=Vy_extra, rows=rows).values
rows_ = S_.shape[0] if rows is None else rows
sensitivity = sparse_tables_dot_multiple([self.data.values, S_.T,
G_inv], rows=rows_)
if threshold is not None:
sensitivity[abs(sensitivity) < threshold] = 0
return pd.DataFrame(sensitivity, index=index, columns=columns)
def _gls_constrained_sensitivity(self, S, rows=None,
threshold=None):
"""
Method to obtain sensitivity according to constrained Least-Squares:
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`pd.DataFrame`
constrained Least-Squares sensitivity.
Notes
-----
..note :: This method is equivalent to `_gls_general_sensitivity`
but for a constrained system
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = CategoryCov.from_var([1, 1])
>>> cov._gls_constrained_sensitivity(S)
0 1
0 -2.00000e+00 1.50000e+00
1 1.00000e+00 -5.00000e-01
>>> cov._gls_constrained_sensitivity(S, rows=1)
0 1
0 -2.00000e+00 1.50000e+00
1 1.00000e+00 -5.00000e-01
"""
# Data in a appropiate format
S_ = pd.DataFrame(S)
index = S_.index
columns = S_.columns
G_inv = self._gls_G_inv(S, rows=rows).values
rows_ = S_.shape[0] if rows is None else rows
sensitivity = sparse_tables_dot_multiple([G_inv, S_,
self.data.values],
rows=rows_)
if threshold is not None:
sensitivity[abs(sensitivity) < threshold] = 0
return pd.DataFrame(sensitivity, index=index, columns=columns)
def _gls_cov_sensitivity(self, S, Vy_extra=None,
rows=None, threshold=None):
"""
Method to obtain covariance sensitivity according to GLS:
.. math::
$$
V_{x_{prior}}\cdot S^T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`pd.DataFrame`
GlS sensitivity for a given Vy and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_cov_sensitivity(S, Vy)
0 1
0 4.00000e-01 4.00000e-01
1 4.00000e-01 6.85714e-01
>>> S = pd.DataFrame([[1, 2], [3, 4]], index=[1, 2],columns=[3, 4])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> cov._gls_cov_sensitivity(S, Vy)
3 4
3 4.00000e-01 4.00000e-01
4 4.00000e-01 6.85714e-01
>>> cov._gls_cov_sensitivity(S, Vy, rows=1)
3 4
3 4.00000e-01 4.00000e-01
4 4.00000e-01 6.85714e-01
"""
index = columns = pd.DataFrame(S).columns
S_ = pd.DataFrame(S).values
general_sens = self._gls_general_sensitivity(S, Vy_extra=Vy_extra,
rows=rows,
threshold=threshold).values
rows_ = S_.shape[0] if rows is None else rows
cov_sens = sparse_tables_dot(general_sens, S_, rows=rows_).toarray()
if threshold is not None:
cov_sens[abs(cov_sens) < threshold] = 0
return pd.DataFrame(cov_sens, index=index, columns=columns)
def gls_update(self, S, Vy_extra=None, rows=None,
threshold=None):
"""
Perform GlS update for a given variance and sensitivity:
.. math::
$$
V_{x_{post}} = V_{x_{prior}} - V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`CategoryCov`
GLS method apply to a CategoryCov object for a given Vy and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov.gls_update(S, Vy)
0 1
0 6.00000e-01 -4.00000e-01
1 -4.00000e-01 3.14286e-01
>>> cov.gls_update(S, Vy, rows=1)
0 1
0 6.00000e-01 -4.00000e-01
1 -4.00000e-01 3.14286e-01
"""
index, columns = self.data.index, self.data.columns
A = self._gls_cov_sensitivity(S, Vy_extra=Vy_extra,
rows=rows, threshold=threshold).values
rows_ = self.data.shape[0] if rows is None else rows
Vx_prior = self.to_sparse(method='csc_matrix')
diff = sparse_tables_dot(A, Vx_prior, rows=rows_)
# gls update
Vx_post = Vx_prior - diff
Vx_post = Vx_post.toarray()
if threshold is not None:
Vx_post[abs(Vx_post) < threshold] = 0
return self.__class__(pd.DataFrame(Vx_post, index=index, columns=columns))
def constrained_gls_update(self, S, rows=None,
threshold=None):
"""
Perform constrained Least-Squares update for a given sensitivity:
.. math::
$$
V_{x_{post}} = V_{x_{prior}} - V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T\right)^{-1} \cdot S \cdot V_{x_{prior}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`CategoryCov`
Constrained Least-squares method apply to a CategoryCov object
for a given S.
Notes
-----
..note :: This method is equivalent to `gls_update` but for a
constrained system
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov_update = cov.constrained_gls_update(S).data.round(decimals=6)
>>> assert np.amax(cov_update.values) == 0.0
>>> cov_update = cov.constrained_gls_update(S, rows=1).data.round(decimals=6)
>>> assert np.amax(cov_update.values) == 0.0
"""
return self.gls_update(S, Vy_extra=None, rows=rows, threshold=threshold)
def sandwich(self, s, rows=None, threshold=None):
"""
Apply the sandwich formula to the CategoryCov object for a given
pandas.Series.
Parameters
----------
s : 1D or 2D iterable
General sensitivities.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
Thereshold to avoid numerical fluctuations. The default is None.
Returns
-------
`float` (if s is 1D iterable)
The resulting scalar number after having applied the sandwich
formula for a given 1D iterable.
`CategoryCov` (if s is 2D iterable)
`CategoryCov` object to which we have applied sandwich
formula for a given 2D iterable.
Warnings
--------
The `CategoryCov` object and the sensitivity (S) must have the same
indices.
Examples
--------
>>> var = np.array([1, 2, 3])
>>> s = pd.Series([1, 2, 3])
>>> cov = sandy.CategoryCov.from_var(var)
>>> cov.sandwich(s)
36.0
>>> s = np.array([1, 2, 3])
>>> var = pd.Series([1, 2, 3])
>>> cov = sandy.CategoryCov.from_var(var)
>>> var = sandy.CategoryCov.from_var(s).data
>>> cov.sandwich(var)
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 8.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 2.70000e+01
"""
if pd.DataFrame(s).shape[1] == 1:
s_ = pd.Series(s)
sandwich = s_.dot(self.data.dot(s_.T))
# sandwich variable is a scalar
return sandwich
else:
s_ = pd.DataFrame(s).T
sandwich = self._gls_Vy_calc(s_, rows=rows)
if threshold is not None:
sandwich[sandwich < threshold] = 0
return self.__class__(sandwich)
def corr2cov(self, std):
"""
Produce covariance matrix given correlation matrix and standard
deviation array.
Same as :obj: `corr2cov` but it works with :obj: `CategoryCov`
instances.
Parameters
----------
corr : :obj: `CategoryCov`
square 2D correlation matrix
std : 1d iterable
array of standard deviations
Returns
-------
:obj: `CategoryCov`
covariance matrix
Examples
--------
Initialize index and columns
>>> idx = ["A", "B", "C"]
>>> std = np.array([1, 2, 3])
>>> corr = sandy.CategoryCov([[1, 0, 2], [0, 3, 0], [2, 0, 1]], index=idx, columns=idx)
>>> corr.corr2cov(std)
A B C
A 1.00000e+00 0.00000e+00 6.00000e+00
B 0.00000e+00 1.20000e+01 0.00000e+00
C 6.00000e+00 0.00000e+00 9.00000e+00
"""
cov = corr2cov(self.data, std)
index = self.data.index
columns = self.data.columns
return self.__class__(cov, index=index, columns=columns)
@classmethod
@cov33csv
def from_csv(cls, file, **kwargs):
"""
Read covariance matrix from csv file using `pandas.read_csv`.
Parameters
----------
file: `str`
csv file containing covariance matrix (with or w/o indices and
columns)
kwargs: `dict`
keyword arguments to pass to `pd.read_csv`
Returns
-------
`CategoryCov`
object containing covariance matrix
Examples
--------
Read a 2x2 matrix from a string in csv format.
>>> from io import StringIO
>>> cov = pd.DataFrame([[1, 0.4],[0.4, 1]])
>>> string = StringIO(cov.to_csv())
>>> sandy.CategoryCov.from_csv(string, index_col=0)
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
Now use `pandas.MultiIndex` as `index` and `columns`.
This example represents the case of a cross section covariance matrix
for `MAT=9437`, `MT=18` and two energy points `[1e-5, 1e6]`.
>>> tuples = [(9437, 18, 1e-5), (9437, 18, 1e6)]
>>> index = pd.MultiIndex.from_tuples(tuples, names=("MAT", "MT", "E"))
>>> cov.index = cov.columns = index
>>> string = StringIO(cov.to_csv())
>>> pos = [0, 1, 2]
>>> sandy.CategoryCov.from_csv(string, index_col=pos, header=pos)
MAT 9437
MT 18
E 1e-05 1000000.0
MAT MT E
9437 18 1.00000e-05 1.00000e+00 4.00000e-01
1.00000e+06 4.00000e-01 1.00000e+00
"""
df = pd.read_csv(file, **kwargs)
return cls(df)
@classmethod
def random_corr(cls, size, correlations=True, seed=None, **kwargs):
"""
>>> sandy.CategoryCov.random_corr(2, seed=1)
0 1
0 1.00000e+00 4.40649e-01
1 4.40649e-01 1.00000e+00
>>> sandy.CategoryCov.random_corr(2, correlations=False, seed=1)
0 1
0 1.00000e+00 0.00000e+00
1 0.00000e+00 1.00000e+00
"""
np.random.seed(seed=seed)
corr = np.eye(size)
if correlations:
offdiag = np.random.uniform(-1, 1, size**2).reshape(size, size)
up = np.triu(offdiag, 1)
else:
up = np.zeros([size, size])
corr += up + up.T
return cls(corr, **kwargs)
@classmethod
def random_cov(cls, size, stdmin=0.0, stdmax=1.0, correlations=True,
seed=None, **kwargs):
"""
Construct a covariance matrix with random values
Parameters
----------
size : `int`
Dimension of the original matrix
stdmin : `float`, default is 0
minimum value of the uniform standard deviation vector
stdmax : `float`, default is 1
maximum value of the uniform standard deviation vector
correlation : `bool`, default is True
flag to insert the random correlations in the covariance matrix
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator)
Returns
-------
`CategoryCov`
object containing covariance matrix
Examples
--------
>>> sandy.CategoryCov.random_cov(2, seed=1)
0 1
0 2.15373e-02 5.97134e-03
1 5.97134e-03 8.52642e-03
"""
corr = random_corr(size, correlations=correlations, seed=seed)
std = np.random.uniform(stdmin, stdmax, size)
return CategoryCov(corr).corr2cov(std)
def to_sparse(self, method='csr_matrix'):
"""
Method to extract `CategoryCov` values into a sparse matrix
Parameters
----------
method : `str`, optional
SciPy 2-D sparse matrix. The default is 'csr_matrix'.
Methods
-------
`csr_matrix`:
Compressed Sparse Row matrix.
`bsr_matrix`:
Block Sparse Row matrix.
`coo_matrix`:
A sparse matrix in COOrdinate format.
`csc_matrix`:
Compressed Sparse Column matrix.
`dia_matrix`:
Sparse matrix with DIAgonal storage.
`dok_matrix`:
Dictionary Of Keys based sparse matrix.
`lil_matrix`:
Row-based list of lists sparse matrix.
Returns
-------
data_sp : `scipy.sparse.matrix`
`CategoryCov` instance values stored as a sparse matrix
"""
data = self.data.values
if method == 'csr_matrix':
data_sp = sps.csr_matrix(data)
elif method == 'bsr_matrix':
data_sp = sps.bsr_matrix(data)
elif method == 'coo_matrix':
data_sp = sps.coo_matrix(data)
elif method == 'csc_matrix':
data_sp = sps.csc_matrix(data)
elif method == 'dia_matrix':
data_sp = sps.dia_matrix(data)
elif method == 'dok_matrix':
data_sp = sps.dok_matrix(data)
elif method == 'lil_matrix':
data_sp = sps.lil_matrix(data)
else:
raise ValueError('The method does not exist in scipy.sparse')
return data_sp
def get_L(self, rows=None, tolerance=None):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Parameters
----------
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
Returns
-------
`pandas.DataFrame`
Cholesky descomposition low triangular matrix.
Examples
--------
Positive define matrix:
>>> a = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]])
>>> sandy.CategoryCov(a).get_L()
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
>>> sandy.CategoryCov(a).get_L(tolerance=0)
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
>>> sandy.CategoryCov(a).get_L(rows=1)
0 1 2
0 -2.00000e+00 0.00000e+00 0.00000e+00
1 -6.00000e+00 1.00000e+00 0.00000e+00
2 8.00000e+00 5.00000e+00 3.00000e+00
Matrix with negative eigenvalues
>>> sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(rows=1, tolerance=0)
0 1
0 -1.08204e+00 0.00000e+00
1 1.75078e+00 0.00000e+00
>>> sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(tolerance=0)
0 1
0 -1.08204e+00 0.00000e+00
1 1.75078e+00 0.00000e+00
Decomposition test:
>>> L = sandy.CategoryCov(a).get_L()
>>> L.dot(L.T)
0 1 2
0 4.00000e+00 1.20000e+01 -1.60000e+01
1 1.20000e+01 3.70000e+01 -4.30000e+01
2 -1.60000e+01 -4.30000e+01 9.80000e+01
Matrix with negative eigenvalues, tolerance of 0:
>>> L = sandy.CategoryCov([[1, -2],[-2, 3]]).get_L(rows=1, tolerance=0)
>>> L.dot(L.T)
0 1
0 1.17082e+00 -1.89443e+00
1 -1.89443e+00 3.06525e+00
"""
index = self.data.index
columns = self.data.columns
# Reduces the size of the matrix, erasing the zero values
nonzero_idxs, cov_reduced = reduce_size(self.data)
# Obtain the eigenvalues and eigenvectors:
E, V = sandy.CategoryCov(cov_reduced).get_eig(tolerance=tolerance)
E = sps.diags(np.sqrt(E)).toarray()
# Construct the matrix:
rows_ = cov_reduced.shape[0] if rows is None else rows
A = sandy.cov.sparse_tables_dot(V, E, rows=rows_).T.toarray()
# QR decomposition:
Q, R = scipy.linalg.qr(A)
L_redu = R.T
# Original size
L = restore_size(nonzero_idxs, L_redu, len(self.data)).values
return pd.DataFrame(L, index=index, columns=columns)
class EnergyCov(CategoryCov):
"""
Dataframe for a multigroup covariance matrix.
.. note:: It is assumed that the covariance matrix is defined over
multi-group energy grids.
Only 'zero' interpolation is supported.
Attributes
----------
data : `pandas.DataFrame`
covariance matrix as a dataframe
Methods
-------
add
change_grid
from_lb1
from_lb2
from_lb5_sym
from_lb5_asym
from_lb6
sum_covs
Raises
------
`sandy.Error`
if index values are not monotonically increasing
`sandy.Error`
if columns values are not monotonically increasing
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index`
indices
columns : `pandas.MultiIndex`
indices
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Raises
------
`sandy.Error`
if `index` or `columns` are not monotonically increasing
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data)
self._data.index = pd.Index(
self._data.index.values,
name="E",
)
self._data.columns = pd.Index(
self._data.columns.values,
name="E",
)
if not self._data.index.is_monotonic_increasing:
raise sandy.Error("index values are not monotonically increasing")
if not self._data.columns.is_monotonic_increasing:
raise sandy.Error("columns values are not monotonically "
"increasing")
def change_grid(self, ex, ey, inplace=False):
"""
Given one energy grid for the x-axis and one energy grid for the
y-axis, interpolate/extrapolate the covariance matrix over the new
points using the *forward-filling* method.
.. important::
* backward extrapolated values (e.g. below threshold) are replaced
by 0
* forward extrapolated values (e.g. above 20 MeV) are replaced by
the covariance coefficient that refers to the last point in the
original grid
Parameters
----------
ex : `iterable`
energy grid for the x-axis
ey : `iterable`
energy grid for the y-axis
Returns
-------
`sandy.EnergyCov`
Covariance matrix interpolated over the new axes.
Examples
--------
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> C.change_grid([0, 1, 1e6, 1e7], [0, 1, 1e6, 1e7])
E 0.00000e+00 1.00000e+00 1.00000e+06 1.00000e+07
E
0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00
1.00000e+00 0.00000e+00 1.00000e+00 4.40649e-01 4.40649e-01
1.00000e+06 0.00000e+00 4.40649e-01 1.00000e+00 1.00000e+00
1.00000e+07 0.00000e+00 4.40649e-01 1.00000e+00 1.00000e+00
"""
df = self.data.reindex(index=ex, method="ffill") \
.reindex(columns=ey, method="ffill") \
.fillna(0)
if not inplace:
return self.__class__(df)
self.data = df
def _plot_matrix(self, ax, xscale='log', yscale='log', cmap='bwr',
vmin=-1, vmax=1, emin=1e-5, emax=2e7, **kwargs):
new_xgrid = np.unique([*self.data.index, *[emin, emax]])
new_ygrid = np.unique([*self.data.columns, *[emin, emax]])
data = self.change_grid(ex=new_xgrid, ey=new_ygrid).data
X, Y = np.meshgrid(data.index.values, data.columns.values)
qmesh = ax.pcolormesh(
X.T,
Y.T,
data.values,
cmap=cmap,
vmin=vmin,
vmax=vmax,
**kwargs,
)
ax.set_xlim([emin, emax])
ax.set_ylim([emin, emax])
plt.colorbar(qmesh)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
return ax
def add(self, cov, inplace=False):
"""
Add the content of another `EnergyCov` (sum).
If the energy grids do not match, interpolate.
Parameters
----------
cov : `sandy.EnergyCov`
multigroup covariance matrices (axes can be different)
inplace : `bool`, optional, default is `False`
flag to operate **inplace**
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
Examples
--------
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> C.add(C)
E 1.00000e-02 1.00000e+06
E
1.00000e-02 2.00000e+00 8.81298e-01
1.00000e+06 8.81298e-01 2.00000e+00
>>> eg = [1e-1, 1]
>>> D = sandy.EnergyCov.random_corr(2, seed=5, index=eg, columns=eg)
>>> C.add(D)
E 1.00000e-02 1.00000e-01 1.00000e+00 1.00000e+06
E
1.00000e-02 1.00000e+00 1.00000e+00 1.00000e+00 4.40649e-01
1.00000e-01 1.00000e+00 2.00000e+00 1.74146e+00 1.18211e+00
1.00000e+00 1.00000e+00 1.74146e+00 2.00000e+00 1.44065e+00
1.00000e+06 4.40649e-01 1.18211e+00 1.44065e+00 2.00000e+00
>>> assert C.add(D).data.equals(D.add(C).data)
"""
ex = np.unique([*self.data.index, *cov.data.index])
ey = np.unique([*self.data.columns, *cov.data.columns])
x = self.change_grid(ex, ey)
y = cov.change_grid(ex, ey)
data = x.data.add(y.data)
if inplace:
self.data = data
else:
return self.__class__(data)
@classmethod
def sum_covs(cls, *covs):
"""
Sum multigroup covariance matrices into a single one.
Parameters
----------
covs : iterable of `sandy.EnergyCov`
list of multigroup covariance matrices (axes can be different)
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
Examples
--------
Sum two 2x2 correlation matrices with different indices and columns
>>> eg = [1e-2, 1e6]
>>> C = sandy.EnergyCov.random_corr(2, seed=1, index=eg, columns=eg)
>>> eg = [1e-1, 1]
>>> D = sandy.EnergyCov.random_corr(2, seed=5, index=eg, columns=eg)
>>> sandy.EnergyCov.sum_covs(C, D)
E 1.00000e-02 1.00000e-01 1.00000e+00 1.00000e+06
E
1.00000e-02 1.00000e+00 1.00000e+00 1.00000e+00 4.40649e-01
1.00000e-01 1.00000e+00 2.00000e+00 1.74146e+00 1.18211e+00
1.00000e+00 1.00000e+00 1.74146e+00 2.00000e+00 1.44065e+00
1.00000e+06 4.40649e-01 1.18211e+00 1.44065e+00 2.00000e+00
"""
return functools.reduce(lambda x, y: x.add(y), covs)
@classmethod
def from_lb1(cls, evalues, fvalues):
"""Extract square covariance matrix from NI-type sub-subsection data
with flag `lb=1`.
Parameters
----------
evalues : iterable
covariance energy grid (same for both axes)
fvalues : iterable
array of F-values (covriance matrix diagonal)
Returns
-------
`sandy.EnergyCov`
Multi-group covariance matrix.
"""
cov = np.diag(fvalues)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb2(cls, evalues, fvalues):
"""Extract square covariance matrix from NI-type sub-subsection data
with flag `lb=2`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
f = np.array(fvalues)
cov = f*f.reshape(-1,1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb5_sym(cls, evalues, fvalues):
"""Extract square symmetric covariance matrix from NI-type sub-subsection data
with flag `lb=5`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values (flattened upper triangular matrix coefficients)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ne = len(evalues)
cov = np.zeros([ne - 1, ne - 1])
indices = np.triu_indices(ne - 1)
cov[indices] = np.array(fvalues)
cov += np.triu(cov, 1).T
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb5_asym(cls, evalues, fvalues):
"""
Extract square asymmetric covariance matrix from NI-type sub-subsection data
with flag `lb=5`.
Parameters
----------
evalues : `iterable`
covariance energy grid for both axis
fvalues : `iterable`
array of F-values (flattened full matrix)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ne = len(evalues)
cov = np.array(fvalues).reshape(ne - 1, ne - 1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues, columns=evalues)
@classmethod
def from_lb6(cls, evalues_r, evalues_c, fvalues):
"""Extract covariance matrix from NI-type sub-subsection data
with flag `lb6`.
Parameters
----------
evalues_r : `iterable`
covariance energy grid for row axis
evalues_c : `iterable`
covariance energy grid for column axis
fvalues : `iterable`
array of F-values (flattened full matrix)
Returns
-------
`sandy.formats.utils.EnergyCov`
Multi-group covariance matrix.
"""
ner = len(evalues_r)
nec = len(evalues_c)
cov = np.array(fvalues).reshape(ner-1, nec-1)
# add zero row and column at the end of the matrix
cov = np.insert(cov, cov.shape[0], [0]*cov.shape[1], axis=0)
cov = np.insert(cov, cov.shape[1], [0]*cov.shape[0], axis=1)
return cls(cov, index=evalues_r, columns=evalues_c)
class GlobalCov(CategoryCov):
@classmethod
def from_list(cls, iterable):
"""
Extract global cross section/nubar covariance matrix from iterables
of `EnergyCovs`.
Parameters
----------
iterable : iterable
list of tuples/lists/iterables with content `[mat, mt, mat1, mt1, EnergyCov]`
Returns
-------
`XsCov` or `pandas.DataFrame`
global cross section/nubar covariance matrix (empty dataframe if no covariance matrix was found)
"""
columns = ["KEYS_ROWS", "KEYS_COLS", "COV"]
# Reindex the cross-reaction matrices
covs = | pd.DataFrame.from_records(iterable) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# coding: utf-8
# In[133]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.model.forecast import SampleForecast
# In[134]:
df=pd.read_csv('data_share.csv', sep=',', index_col = 0, parse_dates=True)
# In[135]:
df['y'] = pd.to_numeric(df["y"], downcast="float")
# In[ ]:
# In[136]:
df
# In[137]:
df.T.iloc[:, 234:247]
# In[ ]:
# In[138]:
df_input = df.reset_index(drop=True).T.reset_index()
# In[139]:
df_input.iloc[:, 233]
ts_code=df_input['index'].astype('category').cat.codes.values
ts_code.reshape(-1,1)
df_test1 = df_input.iloc[:, 235:247]
df_test1
# In[140]:
df_training = df_input.iloc[:, 1:233].values
df_test = df_input.iloc[:, 234:247].values
df_training
# In[141]:
freq='3M'
start_train = pd.Timestamp('1959-09-01', freq=freq)
start_test = pd.Timestamp('2018-03-01', freq=freq)
future_forecast = | pd.Timestamp('') | pandas.Timestamp |
from copy import deepcopy
import os
from numpy import array
from numpy import nan
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_panel_equal
import pytest
import cdpybio as cpb
# Note: I use pos and neg in this file to refer to the plus and minus strands
# respectively.
# TODO: I might want to include some more tests. I haven't tested whether the
# stats in the statsfiles are correct. I might want to check to make sure the
# results aren't sensitive to strand. I could also test the define_sample_name
# functionality.
def add_root(fn):
return os.path.join(cpb._root, 'tests', 'star', fn)
SJ_NEG_NEW_A = add_root('SJ.out.tab.neg_new_a')
SJ_NEG_NEW_B = add_root('SJ.out.tab.neg_new_b')
SJ_NEG_NONEW_A = add_root('SJ.out.tab.neg_nonew_a')
SJ_NEG_NONEW_B = add_root('SJ.out.tab.neg_nonew_b')
SJ_NEW = add_root('SJ.out.tab.new')
SJ_NEW_A = add_root('SJ.out.tab.new_a')
SJ_NEW_B = add_root('SJ.out.tab.new_b')
SJ_NONEW_A = add_root('SJ.out.tab.nonew_a')
SJ_NONEW_B = add_root('SJ.out.tab.nonew_b')
SJ_UNK_NONEW_A = add_root('SJ.out.tab.unk_nonew_a')
EXT = add_root('ext.tsv')
class TestMisc:
def test_read_ext(self):
vals = [['gene1', 'chr1', 10, 20, '+', 'chr1:10', 'chr1:20',
'chr1:10:+', 'chr1:20:+', 'chr1:10-20'],
['gene1', 'chr1', 5, 25, '+', 'chr1:5', 'chr1:25', 'chr1:5:+',
'chr1:25:+', 'chr1:5-25'],
['gene1', 'chr1', 2, 20, '+', 'chr1:2', 'chr1:20', 'chr1:2:+',
'chr1:20:+', 'chr1:2-20'],
['gene1', 'chr1', 5, 20, '+', 'chr1:5', 'chr1:20', 'chr1:5:+',
'chr1:20:+', 'chr1:5-20'],
['gene2', 'chr2', 10, 20, '-', 'chr2:10', 'chr2:20',
'chr2:20:-', 'chr2:10:-', 'chr2:10-20'],
['gene2', 'chr2', 5, 25, '-', 'chr2:5', 'chr2:25', 'chr2:25:-',
'chr2:5:-', 'chr2:5-25'],
['gene2', 'chr2', 2, 20, '-', 'chr2:2', 'chr2:20', 'chr2:20:-',
'chr2:2:-', 'chr2:2-20'],
['gene2', 'chr2', 5, 20, '-', 'chr2:5', 'chr2:20', 'chr2:20:-',
'chr2:5:-', 'chr2:5-20']]
ind = [u'chr1:10-20:+', u'chr1:5-25:+', u'chr1:2-20:+', u'chr1:5-20:+',
u'chr2:10-20:-', u'chr2:5-25:-', u'chr2:2-20:-', u'chr2:5-20:-']
cols=['gene', 'chrom', 'start', 'end', 'strand', 'chrom:start',
'chrom:end', 'donor', 'acceptor', 'intron']
df = pd.DataFrame(vals, index=ind, columns=cols)
df2, stats = cpb.star.read_external_annotation(EXT)
assert_frame_equal(df, df2)
def test_read_sj_out_pos(self):
vals = [['chr1', 2, 20, '+', 'GT/AG', True, 5, 1, 10],
['chr1', 5, 20, '+', 'GT/AG', True, 20, 1, 14],
['chr1', 5, 25, '+', 'CT/AC', True, 10, 1, 7],
['chr1', 10, 20, '+', 'CT/AC', True, 20, 1, 7]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang']
df = pd.DataFrame(vals, columns=cols)
df2 = cpb.star.read_sj_out_tab(SJ_NONEW_A)
assert_frame_equal(df, df2)
def test_read_sj_out_neg(self):
vals = [['chr2', 2, 20, '-', 'GT/AG', True, 5, 1, 10],
['chr2', 5, 20, '-', 'GT/AG', True, 20, 1, 14],
['chr2', 5, 25, '-', 'CT/AC', True, 10, 1, 7],
['chr2', 10, 20, '-', 'CT/AC', True, 20, 1, 7]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang']
df = pd.DataFrame(vals, columns=cols)
df2 = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_A)
assert_frame_equal(df, df2)
def test_read_sj_out_unk(self):
df = pd.DataFrame([['chr3', 2, 20, 'unk', 'GT/AG', True, 5, 1, 10],
['chr3', 5, 20, 'unk', 'GT/AG', True, 20, 1, 14],
['chr3', 5, 25, 'unk', 'CT/AC', True, 10, 1, 7],
['chr3', 10, 20, 'unk', 'CT/AC', True, 20, 1, 7]],
columns=[u'chrom', u'start',
u'end', u'strand',
u'intron_motif', u'annotated',
u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang'])
df2 = cpb.star.read_sj_out_tab(SJ_UNK_NONEW_A)
assert_frame_equal(df, df2)
# TODO: I'm running into some kind of error when I compare the dataframes. I
# see some rumbling that there may be some numpy/pandas difficulties so it
# might not be my problem.
# def test_read_log(self):
# ind = [u'Started job on', u'Started mapping on', u'Finished on',
# u'Mapping speed, Million of reads per hour',
# u'Number of input reads', u'Average input read length',
# u'Uniquely mapped reads number', u'Uniquely mapped reads %',
# u'Average mapped length', u'Number of splices: Total',
# u'Number of splices: Annotated (sjdb)',
# u'Number of splices: GT/AG', u'Number of splices: GC/AG',
# u'Number of splices: AT/AC', u'Number of splices: Non-canonical',
# u'Mismatch rate per base, %', u'Deletion rate per base',
# u'Deletion average length', u'Insertion rate per base',
# u'Insertion average length',
# u'Number of reads mapped to multiple loci',
# u'% of reads mapped to multiple loci',
# u'Number of reads mapped to too many loci',
# u'% of reads mapped to too many loci',
# u'% of reads unmapped: too many mismatches',
# u'% of reads unmapped: too short', u'% of reads unmapped: other']
# cols = [add_root('Log.final.out.a')]
# vals= [['Mar 06 17:38:15'], ['Mar 06 17:53:05'], ['Mar 06 20:13:16'],
# ['62.51'], ['146042756'], ['135'], ['103778365'], ['71.06%'],
# ['119.74'], ['37420413'], ['35853326'], ['36980144'], ['351650'],
# ['17910'], ['70709'], ['1.13%'], ['0.01%'], ['1.51'], ['0.01%'],
# ['1.29'], ['42173939'], ['28.88%'], ['536'], ['0.00%'],
# ['0.00%'], ['0.00%'], ['0.06%']]
# df = pd.DataFrame(vals, index=ind, columns=cols)
# df2 = cpb.star._read_log(add_root('Log.final.out.a'))
# assert_frame_equal(df, df2)
# TODO: I'm running into some kind of error when I compare the dataframes. I
# see some rumbling that there may be some numpy/pandas difficulties so it
# might not be my problem.
# def test_make_logs_df(self):
# cols = [u'Started job on', u'Started mapping on', u'Finished on',
# u'Mapping speed, Million of reads per hour',
# u'Number of input reads', u'Average input read length',
# u'Uniquely mapped reads number', u'Uniquely mapped reads %',
# u'Average mapped length', u'Number of splices: Total',
# u'Number of splices: Annotated (sjdb)',
# u'Number of splices: GT/AG', u'Number of splices: GC/AG',
# u'Number of splices: AT/AC', u'Number of splices: Non-canonical',
# u'Mismatch rate per base, %', u'Deletion rate per base',
# u'Deletion average length', u'Insertion rate per base',
# u'Insertion average length',
# u'Number of reads mapped to multiple loci',
# u'% of reads mapped to multiple loci',
# u'Number of reads mapped to too many loci',
# u'% of reads mapped to too many loci',
# u'% of reads unmapped: too many mismatches',
# u'% of reads unmapped: too short', u'% of reads unmapped: other']
# ind = [add_root(x) for x in ['Log.final.out.a', u'Log.final.out.b']]
# vals = [['Mar 06 17:38:15', 'Mar 06 17:53:05', 'Mar 06 20:13:16', 62.51,
# 146042756.0, 135.0, 103778365.0, 71.06, 119.74, 37420413.0,
# '35853326', 36980144.0, 351650.0, 17910.0, 70709.0, 1.13, 0.01,
# '1.51', 0.01, '1.29', 42173939.0, 28.88, 536.0, 0.0, 0.0, 0.0,
# 0.06],
# ['Mar 04 19:39:13', 'Mar 04 19:49:11', 'Mar 04 21:13:01', 84.92,
# 118648978.0, 136.0, 105411961.0, 88.84, 132.3, 30047584.0,
# '29100214', 29616122.0, 351932.0, 21726.0, 57804.0, 0.69, 0.01,
# '1.51', 0.01, '1.25', 13141675.0, 11.08, 951.0, 0.0, 0.0, 0.0,
# 0.08]]
# df = pd.DataFrame(vals, index=ind, columns=cols)
# df2 = cpb.star.make_logs_df(
# [add_root(x) for x in ['Log.final.out.a', 'Log.final.out.b']])
# assert_frame_equal(df, df2)
class TestMakeSJOutDict:
def test_make_sj_out_dict_pos(self):
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NONEW_B])
a = cpb.star.read_sj_out_tab(SJ_NONEW_A)
a.index = a.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
b = cpb.star.read_sj_out_tab(SJ_NONEW_B)
b.index = b.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
assert_frame_equal(a, d[SJ_NONEW_A])
assert_frame_equal(b, d[SJ_NONEW_B])
def test_make_sj_out_dict_neg(self):
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
a = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_A)
a.index = a.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
b = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_B)
b.index = b.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
assert_frame_equal(a, d[SJ_NEG_NONEW_A])
assert_frame_equal(b, d[SJ_NEG_NONEW_B])
class TestMakeSJOutPanel:
def test_make_sj_out_panel_pos(self):
ind = [u'chr1:5-20', u'chr1:5-25', u'chr1:10-20']
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NONEW_B])
df = d[SJ_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = d[SJ_NONEW_B].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NONEW_A:df,
SJ_NONEW_B:df2})
p = p.astype(int)
a = pd.DataFrame([['chr1', 5, 20, '+', 'GT/AG', True],
['chr1', 5, 25, '+', 'CT/AC', True],
['chr1', 10, 20, '+', 'CT/AC', True]],
index=ind,
columns=[u'chrom', u'start',
u'end', u'strand', u'intron_motif',
u'annotated'])
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_make_sj_out_panel_neg(self):
ind = [u'chr2:5-20', u'chr2:5-25', u'chr2:10-20']
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
df = d[SJ_NEG_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = d[SJ_NEG_NONEW_B].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NEG_NONEW_A:df,
SJ_NEG_NONEW_B:df2})
p = p.astype(int)
a = pd.DataFrame([['chr2', 5, 20, '-', 'GT/AG', True],
['chr2', 5, 25, '-', 'CT/AC', True],
['chr2', 10, 20, '-', 'CT/AC', True]],
index=ind,
columns=[u'chrom', u'start',
u'end', u'strand', u'intron_motif',
u'annotated'])
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_new_junctions_pos(self):
ind = [u'chr1:2-25', u'chr1:3-25', u'chr1:5-20', u'chr1:5-30',
u'chr1:10-20', u'chr1:30-40']
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NEW_A])
df = d[SJ_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df = df.fillna(0)
df2 = d[SJ_NEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NONEW_A:df,
SJ_NEW_A:df2})
p = p.astype(int)
a = pd.DataFrame(
[['chr1', 2, 25, '+', 'GT/AG', False],
['chr1', 3, 25, '+', 'CT/AC', False],
['chr1', 5, 20, '+', 'GT/AG', True],
['chr1', 5, 30, '+', 'GT/AG', False],
['chr1', 10, 20, '+', 'CT/AC', True],
['chr1', 30, 40, '+', 'CT/AC', False]],
index=ind,
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated']
)
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_new_junctions_neg(self):
ind = [u'chr2:2-25', u'chr2:3-25', u'chr2:5-20', u'chr2:5-30',
u'chr2:10-20', u'chr2:30-40']
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NEW_A])
df = d[SJ_NEG_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df = df.fillna(0)
df2 = d[SJ_NEG_NEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NEG_NONEW_A:df,
SJ_NEG_NEW_A:df2})
p = p.astype(int)
a = pd.DataFrame(
[['chr2', 2, 25, '-', 'GT/AG', False],
['chr2', 3, 25, '-', 'CT/AC', False],
['chr2', 5, 20, '-', 'GT/AG', True],
['chr2', 5, 30, '-', 'GT/AG', False],
['chr2', 10, 20, '-', 'CT/AC', True],
['chr2', 30, 40, '-', 'CT/AC', False]],
index=ind,
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated']
)
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
| assert_panel_equal(p, p2) | pandas.util.testing.assert_panel_equal |
#%%
import os
import glob
import itertools
import numpy as np
import pandas as pd
import skbio
# Import this project's library
import fit_seq
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import matplotlib_venn as mpl_venn
# Seaborn, useful for graphics
import seaborn as sns
#%%
# Define dataset
DATASET = '20171114_fitseq_gfp'
# Define file pattern
PATTERN = '*day*_gfp.qcfilt.bin*.fastq.gz'
# Define time point position in file name
# For example for a file named:
# 20160519_MG1655_prel_time1_rel.qcfilt.bin2.fastq.gz
# the pattern 'time' is at position [3] when split by '_'
TIME_IDX = 3
# Define data directory
datadir = '../../../data/raw_sequencing/' +\
f'{DATASET}/'
# Define output dir
outputdir = '../../../data/raw_read_counts/'
# List all fastq.gz files
fastq_files = glob.glob(f'{datadir}{PATTERN}')
#%%
df_seq = pd.DataFrame()
for fastq in fastq_files:
# Extract time point number
time = fastq.split('/')[-1].split('_')[TIME_IDX]
print(f'time {time}: {fastq}')
# Use skbio to have a generator to iterate over fastq
seqs = skbio.io.read(fastq,
format='fastq',
verify=True)
# Initialize list to save sequence objects
seq_list = list()
# Iterate over sequences
# initialize counter
counter = 0
for seq in seqs:
if counter%100000 == 0:
print(f'count # {counter}')
# Extract sequence information
sequence = str(skbio.DNA(sequence=seq,
validate=False))
# Append to list
seq_list.append([sequence])
# Update counter
counter += 1
# Initialize dataframe to save sequences
names = ['sequence']
df = | pd.DataFrame.from_records(seq_list, columns=names) | pandas.DataFrame.from_records |
#
# This utility is to generate the bar chart of FLANNEL vs Patched FLANNEL Covid-19 scores
# Make sure results_home and measure_detail* file names are adjusted accordingly while running this util.
# results_home - line 12
# measure_detail* files in create_f1df function
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Update results home value
results_home = '/home/ubuntu/DeepLearningForHealthCareProject/results/'
base_learners = ["densenet161", "inception_v3", "resnet152", "resnext101_32x8d", "vgg19_bn", "ensemble"]
types = ["Covid-19", "Pneumonia Virus", "Pneumonia Bacteria", "Normal"]
# create cv index for each learner
# depending on number of folds the index would be cv1, cv2, cv3 etc..
num_folds = 5
cv_index = []
for i in range(num_folds):
fold = i + 1
cv_index.append('cv' + str(fold))
# function to compute f1 score and macro-f1 score for each fold and each learner (for each file)
def compute_f1score(df):
df.set_index('Type', inplace=True)
micro_pr = df.iloc[0].mean()
micro_re = df.iloc[1].mean()
f1_df = pd.DataFrame(columns=types, index=['f1'])
for x_type in types:
prec = df[x_type]['Precision']
recl = df[x_type]['Recall']
f1_df[x_type]['f1'] = (2 * prec * recl) / (prec + recl)
f1_df['Macro_F1_Score'] = f1_df.mean(axis=1)
f1_df['Micro_F1_Score'] = (2 * micro_pr * micro_re) / (micro_pr + micro_re)
return f1_df
# Wrapper function to read P & R, call F1 compute to create DF with for a base learner
def create_f1df(learner, dir):
learner_f1 = pd.DataFrame(columns=types, index=cv_index)
macro_f1_list = []
micro_f1_list = []
for ind in cv_index:
result_home = results_home + dir + learner + '/' + ind
# print(result_home)
if learner == 'ensemble':
raw_df = pd.read_csv(result_home + '/measure_detail.csv')
else:
raw_df = pd.read_csv(result_home + '/measure_detail_' + learner + '_test_' + ind + '.csv')
f1_df = compute_f1score(raw_df)
# print(f1_df['Macro_F1_Score']['f1'])
for x_type in types:
learner_f1[x_type][ind] = f1_df[x_type]['f1']
macro_f1_list.append(f1_df['Macro_F1_Score']['f1'])
micro_f1_list.append(f1_df['Micro_F1_Score']['f1'])
learner_f1['Macro_F1_Score'] = macro_f1_list
learner_f1['Micro_F1_Score'] = micro_f1_list
return learner_f1
def addlabels(x,y,color='black'):
for i in range(len(x)):
plt.text(x[i], y[i], str(round(y[i], 4)), ha='right', va='bottom', fontsize='small', color=color)
def create_bar(scores, errors, micro_scores, micro_errors):
#x_pos = [i for i,_ in enumerate(base_learners)]
x_pos = [1, 4, 7, 10, 13, 16]
x_micro_pos = [2, 5, 8, 11, 14, 17]
colors = {'densenet161': 'papayawhip', 'inception_v3': 'blanchedalmond', 'vgg19_bn': 'bisque',
'resnext101_32x8d': 'moccasin', 'resnet152': 'navajowhite', 'ensemble': 'limegreen'}
colors_micro = {'densenet161': 'burlywood', 'inception_v3': 'tan', 'vgg19_bn': 'orange',
'resnext101_32x8d': 'orange', 'resnet152': 'goldenrod', 'ensemble': 'forestgreen'}
fig = plt.figure()
fig.add_axes([0.1, 0.1, 0.6, 0.75])
plt.bar(x_pos, scores, color=colors.values(), yerr=errors, width=0.6, linewidth=0.1, figure=fig)
plt.bar(x_micro_pos, micro_scores, color=colors_micro.values(), yerr=micro_errors, width=1.0, linewidth=0.1,
figure=fig)
addlabels(x_pos, scores)
addlabels(x_micro_pos, micro_scores)
plt.title("COVID-19 F1 score vs MICRO F1 Score", fontsize=10)
plt.ylabel("F1 Scores")
#plt.xlabel(base_learner)
labels = list(colors.keys())
handles = [plt.Rectangle((0, 0), 1, 1, color=colors[label]) for label in labels]
plt.legend(handles, labels, loc='upper right', bbox_to_anchor=(1.5, 1))
plt.xticks(x_pos, base_learners, rotation=23)
plt.show()
def create_comp_bars(prev_scores, prev_errors, curr_scores, curr_errors):
#x_pos = [i for i,_ in enumerate(base_learners)]
bar_width = 8.0
gap = (2 * bar_width) + 9.0
prev_ind = 1
curr_ind = prev_ind + bar_width + 0.8
x_prev_pos = [prev_ind]
x_curr_pos = [curr_ind]
for i in range(len(base_learners) - 1):
x_prev_pos.append(x_prev_pos[i] + gap)
x_curr_pos.append(x_curr_pos[i] + gap)
#x_prev_pos = [1, 4, 7, 10, 13, 16]
#x_curr_pos = [2, 5, 8, 11, 14, 17]
#print(x_prev_pos)
#print(x_curr_pos)
prev_colors = {'densenet161': 'papayawhip', 'inception_v3': 'blanchedalmond', 'vgg19_bn': 'bisque',
'resnext101_32x8d': 'moccasin', 'resnet152': 'navajowhite', 'ensemble': 'limegreen'}
curr_colors = {'densenet161': 'burlywood', 'inception_v3': 'tan', 'vgg19_bn': 'orange',
'resnext101_32x8d': 'orange', 'resnet152': 'goldenrod', 'ensemble': 'forestgreen'}
fig = plt.figure(figsize=(12,10), dpi=80)
fig.add_axes([0.1, 0.1, 0.6, 0.75])
plt.bar(x_prev_pos, prev_scores, color=prev_colors.values(), yerr=prev_errors, width=bar_width, linewidth=0.1, figure=fig)
plt.bar(x_curr_pos, curr_scores, color=curr_colors.values(), yerr=curr_errors, width=bar_width, linewidth=0.1, figure=fig)
addlabels(x_prev_pos, prev_scores, color='black')
addlabels(x_curr_pos, curr_scores, color='blue')
plt.title("Original FLANNEL vs Patched FLANNEL COVID-19 F1 scores", fontsize=15)
plt.ylabel("F1 Scores")
labels = list(prev_colors.keys())
labels_curr = list(curr_colors.keys())
handles = [plt.Rectangle((0, 0), 1, 1, color=prev_colors[label]) for label in labels]
handles_curr = [plt.Rectangle((0, 0), 1, 1, color=curr_colors[label]) for label in labels]
#plt.legend(handles, labels, loc='upper right', bbox_to_anchor=(1.5, 1))
#plt.legend(handles_curr, labels_curr, loc='upper right', bbox_to_anchor=(1.5, 1))
legend1 = plt.legend(handles, labels, loc='upper right', bbox_to_anchor=(1.5, 1), title="Original FLANNEL Models")
legend2 = plt.legend(handles_curr, labels_curr, loc='lower right', bbox_to_anchor=(1.5, 0.1), title="Patched FLANNEL Models")
fig.add_artist(legend1)
fig.add_artist(legend2)
plt.xticks(x_prev_pos, base_learners, rotation=23, fontsize=12)
plt.show()
def get_f1_scores(dir):
f1_scores = []
sd_scores = []
micro_f1_scores = []
micro_sd_scores = []
all_f1_scores = | pd.DataFrame(columns=types, index=base_learners) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Esta version de este codigo, saca los umbrales horarios y estacionalesde las reflectancias'
'en los pixeles seleccionados, cada 15 minutos porque se hace con el set de datos de GOES de'
'2018, debido a que es el mas completo y permitiría obtener los umbrales estacionalmente. La'
'versión antigua de este codigo que los sacaba cada 10 minutos para el horizonte del experi-'
'mento se aloja en la carpetade Backups_VersionesAtiguas_Codigos por si esnecesario volverlo'
'a consultar.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------HORAS SOBRE LAS CUALES TRABAJAR----------------------------- ##
HI = '06:00'; HF = '17:59'
#################################################################################################
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
#################################################################################################
df_P975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2])
df_P350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018.txt', parse_dates=[2])
df_P348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018.txt', parse_dates=[2])
df_P975 = df_P975.set_index(["fecha_hora"])
df_P975.index = df_P975.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P975.index = df_P975.index.tz_localize(None)
df_P350 = df_P350.set_index(["fecha_hora"])
df_P350.index = df_P350.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P350.index = df_P350.index.tz_localize(None)
df_P348 = df_P348.set_index(["fecha_hora"])
df_P348.index = df_P348.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P348.index = df_P348.index.tz_localize(None)
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = | pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce') | pandas.to_datetime |
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = pandas.concat(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_copy_numbers_from_reference_copy_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = pandas.DataFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not pandas.isna(FoldChange_match):
if not pandas.isna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def determine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].copy()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_df = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_df[Condition] = Data_R_df[Condition]*Data_R_df[mass_col]
Ribosomal_sum = Data_R_df[Condition].sum()
df = Data.loc[:, [Condition, mass_col, 'Location']]
df[Condition] = df[Condition]*df[mass_col]
out = pandas.DataFrame(df.groupby('Location').sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_sum
out.loc['Total', Condition] = out[Condition].sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.rename(columns={Condition: 'original_amino_acid_occupation'}, inplace=True)
out.drop(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_amino_acid_occupation'] / \
out['original_amino_acid_occupation']
return(out)
def determine_correction_factor_A(fractions_entirely_replaced_with_expected_value):
expected_fraction_sum = 0
for i in fractions_entirely_replaced_with_expected_value.keys():
expected_fraction_sum += fractions_entirely_replaced_with_expected_value[i]
factor = 1/(1-expected_fraction_sum)
return(factor)
def determine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def determine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.copy()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, merged_compartments):
out = input.copy()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in merged_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[merged_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def merge_compartments(input, merged_compartments):
out = input.copy()
for c in merged_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[merged_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.copy()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def determine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_summary, protein_data, condition, gene_id_col):
process_efficiencies = pandas.DataFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = sum([proteome_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_summary.loc['Total', 'original_amino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replaced_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, merged_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = determine_correction_factor_A(fractions_entirely_replaced_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replaced_with_expected_value})
factor_B = determine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, merged_compartments=merged_compartments)
out = merge_compartments(input=out, merged_compartments=merged_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = pandas.DataFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_mean_df = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_mean_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = pandas.DataFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_mean_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
mean_val = flux_mean_df.loc[flux_mean_df['ID'] == rx, condition].values[0]
if not pandas.isna(mean_val):
SE_val = flux_mean_SE.loc[flux_mean_SE['ID'] == str(rx+'_SE'), condition].values[0]
out.loc[rx, 'Reaction_ID'] = rx
if not pandas.isna(SE_val):
lb = mean_val-SE_val
ub = mean_val+SE_val
if mean_val < 0:
out.loc[rx, 'LB'] = lb
if ub > 0:
out.loc[rx, 'UB'] = 0
else:
out.loc[rx, 'UB'] = ub
elif mean_val > 0:
out.loc[rx, 'UB'] = ub
if lb < 0:
out.loc[rx, 'LB'] = 0
else:
out.loc[rx, 'LB'] = lb
else:
out.loc[rx, 'LB'] = lb
out.loc[rx, 'UB'] = ub
else:
out.loc[rx, 'LB'] = mean_val
out.loc[rx, 'UB'] = mean_val
flux_dir_df = input.loc[input['Type'] == 'Flux_Direction', :]
if specific_exchanges is None:
exchanges_to_set = list(flux_dir_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
out.loc[rx, 'Reaction_ID'] = rx
if flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 1:
out.loc[rx, 'LB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == -1:
out.loc[rx, 'UB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 0:
out.loc[rx, 'LB'] = 0
out.loc[rx, 'UB'] = 0
flux_upper_df = input.loc[input['Type'] == 'Flux_Upper_Bound', :]
for rx in list(flux_upper_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'UB'] = flux_upper_df.loc[flux_upper_df['ID'] == rx, condition].values[0]
flux_lower_df = input.loc[input['Type'] == 'Flux_Lower_Bound', :]
for rx in list(flux_lower_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'LB'] = flux_lower_df.loc[flux_lower_df['ID'] == rx, condition].values[0]
return(out)
def growth_Rate_from_input(input, condition):
return(input.loc[input['Type'] == 'Growth_Rate', condition].values[0])
def proteome_fractions_from_input(input, condition):
df = input.loc[input['Type'] == 'Expected_ProteomeFraction', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def medium_concentrations_from_input(input, condition):
df = input.loc[input['Type'] == 'Medium_Concentration', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def build_input_proteome_for_specific_kapp_estimation(proteomics_data, condition):
out = pandas.DataFrame()
out['ID'] = proteomics_data['ID']
out['copy_number'] = proteomics_data[condition]
return(out)
def inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=None, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame(columns=['Enzyme_ID','Kapp'])
default_kapps : {'default_kapp':value,'default_transporter_kapp':value}
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
if specific_kapps is not None:
parameterized = []
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = rba_session.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
count = 0
for e in rba_session.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
if default_kapps is not None:
if type(default_kapps) is dict:
rba_session.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapps['default_kapp']
rba_session.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = default_kapps['default_transporter_kapp']
if process_efficiencies is not None:
for i in process_efficiencies.index:
if process_efficiencies.loc[i, 'Process'] in rba_session.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
rba_session.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
rba_session.rebuild_from_model()
def calibration_workflow(proteome,
condition,
reference_condition,
gene_ID_column,
definition_file,
rba_session,
process_efficiency_estimation_input=None,
default_kapps_provided=None):
t0 = time.time()
correction_results = correction_pipeline(input=proteome,
condition=condition,
compartments_to_ignore=['DEF', 'DEFA', 'Def'],
compartments_no_original_PG=['n', 'Secreted'],
fractions_entirely_replaced_with_expected_value=[
'Ribosomes'],
imposed_compartment_fractions=proteome_fractions_from_input(
input=definition_file, condition=condition),
directly_corrected_compartments=[
'c', 'cM', 'erM', 'gM', 'm', 'mIM', 'mIMS', 'mOM', 'vM', 'x'],
merged_compartments={'c': 'Ribosomes'})
# mumax0 = rba_session.findMaxGrowthRate()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# mumax1 = rba_session.findMaxGrowthRate()
if process_efficiency_estimation_input is not None:
process_efficiencies = determine_apparent_process_efficiencies(growth_rate=growth_Rate_from_input(
input=definition_file, condition=condition), input=process_efficiency_estimation_input, rba_session=rba_session, protein_data=proteome, proteome_summary=correction_results['Summary'], condition=condition, gene_id_col=gene_ID_column)
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=process_efficiencies)
else:
process_efficiencies = None
protein_scaling_coefficient = 1000 * determine_correction_factor_C(input=definition_file, condition=condition, reference_condition=reference_condition) * \
correction_results['Correction_factors']['A'] * \
correction_results['Correction_factors']['B']/6.022e23
# protein_scaling_coefficient = 1000 * correction_results['Correction_factors']['A'] * correction_results['Correction_factors']['B']/6.022e23
proteome[condition] *= protein_scaling_coefficient
Specific_Kapps = rba_session.estimate_specific_Kapps(proteomicsData=build_input_proteome_for_specific_kapp_estimation(proteome, condition),
flux_bounds=flux_bounds_from_input(
input=definition_file, condition=condition, specific_exchanges=None),
mu=growth_Rate_from_input(
input=definition_file, condition=condition),
biomass_function=None,
target_biomass_function=True)
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp'].hist()
# plt.show()
# mumax2 = rba_session.findMaxGrowthRate()
if default_kapps_provided is None:
Default_Kapps = rba_session.estimate_default_Kapps(target_mu=growth_Rate_from_input(input=definition_file, condition=condition), compartment_densities_and_PGs=build_input_for_default_kapp_estimation(
correction_results), flux_bounds=flux_bounds_from_input(input=definition_file, condition=condition, specific_exchanges=None), mu_approximation_precision=0.01)
inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps={
'default_kapp': Default_Kapps.iloc[-1, 2], 'default_transporter_kapp': Default_Kapps.iloc[-1, 3]}, process_efficiencies=None)
else:
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=default_kapps_provided, process_efficiencies=None)
Default_Kapps = default_kapps_provided
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=Specific_Kapps, default_kapps=None, process_efficiencies=None)
# mumax3 = rba_session.findMaxGrowthRate()
compartment_densities_and_PGs = build_input_for_default_kapp_estimation(correction_results)
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
rba_session.rebuild_from_model()
rba_session.addExchangeReactions()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# FBs = flux_bounds_from_input(
# input=definition_file, condition=condition, specific_exchanges=None)
#rba_session.Problem.setLB(dict(zip(list(FBs['Reaction_ID']), list(FBs['LB']))))
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'LB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'LB'])})
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'UB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'UB'])})
#rba_session.Problem.setUB(dict(zip(list(FBs['Reaction_ID']), list(FBs['UB']))))
rba_session.Problem.setLB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
rba_session.Problem.setUB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
mumax4 = rba_session.findMaxGrowthRate()
rba_session.recordResults('Prokaryotic')
prok_results = copy.deepcopy(rba_session.Results)
rba_session2 = copy.copy(rba_session)
rba_session2.eukaryoticDensities4(CompartmentRelationships=False)
mumax5 = rba_session2.findMaxGrowthRate()
rba_session2.recordResults('Eukaryotic')
# print([Default_Kapps.iloc[-1, 2], Default_Kapps.iloc[-1, 3]])
# print([growth_Rate_from_input(input=definition_file,
# condition=condition), mumax0, mumax1, mumax2, mumax3, mumax4, mumax5])
print(time.time() - t0)
return({'Simulation_Results': prok_results, 'Simulation_Results_Euk': copy.deepcopy(rba_session2.Results), 'Proteome': build_input_proteome_for_specific_kapp_estimation(proteome, condition), 'Correction_Results': correction_results, 'Default_Kapps': Default_Kapps, 'Specific_Kapps': Specific_Kapps, 'Process_Efficiencies': process_efficiencies})
# seaborn.violinplot(x=Specific_Kapps.loc[Specific_Kapps['Kapp'] <= 400000, 'Kapp'])
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp']).hist()
# plt.show()
# Test predictions
# Given medium predict Mu, Exchanges and Proteome
# Prokaryotic
# Eukaryotic
# 1. import model and uniprot-file and compartment-annotation
## external_annotations for ribosomal-proteins!!! ##
## process-efficiency estimation input ##
## parse input-data properly and add Lahtvee information ##
print('---------------------START----------------------')
Input_Data = pandas.read_csv(
'DataSetsYeastRBACalibration/Calibration_InputDefinition.csv', sep=';', decimal=',', index_col=0)
Process_Efficiency_Estimation_Input = pandas.read_csv(
'DataSetsYeastRBACalibration/Process_Efficiency_Estimation_Input.csv', sep=';', decimal=',')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Uniprot = pandas.read_csv('Yeast_iMM904_RBA_model/uniprot.csv', sep='\t')
Compartment_Annotations_external = pandas.read_csv(
'DataSetsYeastRBACalibration/Manually_curated_Protein_Locations_for_Calibration.csv', index_col=None, sep=';')
Ribosomal_Proteins_Uniprot = pandas.read_csv(
'DataSetsYeastRBACalibration/uniprot_ribosomal_proteins.csv', index_col=None, sep=';')
Hackett_Clim_FCs = pandas.read_csv('DataSetsYeastRBACalibration/Hacket_Clim_ProteinFCs.csv')
Lahtvee_REF = pandas.read_csv('DataSetsYeastRBACalibration/LahtveeRefProteomicsData.csv')
picogram_togram_coefficient = 1e12
Lahtvee_REF['Lahtvee_REF'] *= picogram_togram_coefficient
Lahtvee_REF = Lahtvee_REF.loc[pandas.isna(Lahtvee_REF['Lahtvee_REF']) == False]
ribosomal_proteins = find_ribosomal_proteins(rba_session=Simulation, model_processes=[
'TranslationC', 'TranslationM'], external_annotations=Ribosomal_Proteins_Uniprot)
model_protein_compartment_map = build_model_compartment_map(rba_session=Simulation)
Compartment_Annotations = build_compartment_annotations(
Compartment_Annotations_external=Compartment_Annotations_external, model_protein_compartment_map=model_protein_compartment_map)
print('Annotations to data')
annotations_Lahtvee = build_dataset_annotations(input=Lahtvee_REF, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
annotations_Hackett = build_dataset_annotations(input=Hackett_Clim_FCs, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
full_annotations = build_full_annotations_from_dataset_annotations(
annotations_list=[annotations_Lahtvee, annotations_Hackett])
####### Bootstrapping-loop starts here #######
restored_Hackett_Data = infer_copy_numbers_from_reference_copy_numbers(fold_changes=Hackett_Clim_FCs, absolute_data=Lahtvee_REF, matching_column_in_fold_change_data='Hackett_C01',
matching_column_in_absolute_data='Lahtvee_REF', conditions_in_fold_change_data_to_restore=['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03'])
restored_Hackett_Data = add_annotations_to_proteome(
input=restored_Hackett_Data, ID_column='ID', annotations=full_annotations)
Lahtvee_REF = add_annotations_to_proteome(
input=Lahtvee_REF, ID_column='Gene', annotations=full_annotations)
# default_kapps_provided={'default_kapp':39673 , 'default_transporter_kapp':396730 }
# default_kapps_provided={'default_kapp':85449 , 'default_transporter_kapp':854490 }
# default_kapps_provided={'default_kapp':128174 , 'default_transporter_kapp':1281740 }
# default_kapps_provided={'default_kapp':280762 , 'default_transporter_kapp':2807620 }
# default_kapps_provided = {'default_kapp': 268555, 'default_transporter_kapp': 2685550}
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C005 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C005', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 39673, 'default_transporter_kapp': 396730})
print('0.05')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C01 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C01', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 85449, 'default_transporter_kapp': 854490})
print('0.1')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C016 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C016', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 128174, 'default_transporter_kapp': 1281740})
print('0.16')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C022 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C022', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.22')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C03 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C03', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.3')
specKapps_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Specific_Kapps']['Enzyme_ID']))
specKapps_005['Hackett_C005'] = list(Calibration_Hackett_C005['Specific_Kapps']['Kapp'])
specKapps_01 = pandas.DataFrame(index=list(Calibration_Hackett_C01['Specific_Kapps']['Enzyme_ID']))
specKapps_01['Hackett_C01'] = list(Calibration_Hackett_C01['Specific_Kapps']['Kapp'])
specKapps_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Specific_Kapps']['Enzyme_ID']))
specKapps_016['Hackett_C016'] = list(Calibration_Hackett_C016['Specific_Kapps']['Kapp'])
specKapps_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Specific_Kapps']['Enzyme_ID']))
specKapps_022['Hackett_C022'] = list(Calibration_Hackett_C022['Specific_Kapps']['Kapp'])
specKapps_03 = pandas.DataFrame(index=list(Calibration_Hackett_C03['Specific_Kapps']['Enzyme_ID']))
specKapps_03['Hackett_C03'] = list(Calibration_Hackett_C03['Specific_Kapps']['Kapp'])
all_spec_Kapps = pandas.concat(
[specKapps_005, specKapps_01, specKapps_016, specKapps_022, specKapps_03], axis=1)
all_spec_Kapps['ID'] = all_spec_Kapps.index
all_spec_Kapps.to_csv('Specific_Kapps_out.csv', sep=';', decimal=',')
process_efficiencies_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Process_Efficiencies']['Process']))
process_efficiencies_005['Hackett_C005'] = list(
Calibration_Hackett_C005['Process_Efficiencies']['Value'])
process_efficiencies_01 = pandas.DataFrame(index=list(
Calibration_Hackett_C01['Process_Efficiencies']['Process']))
process_efficiencies_01['Hackett_C01'] = list(
Calibration_Hackett_C01['Process_Efficiencies']['Value'])
process_efficiencies_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Process_Efficiencies']['Process']))
process_efficiencies_016['Hackett_C016'] = list(
Calibration_Hackett_C016['Process_Efficiencies']['Value'])
process_efficiencies_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Process_Efficiencies']['Process']))
process_efficiencies_022['Hackett_C022'] = list(
Calibration_Hackett_C022['Process_Efficiencies']['Value'])
process_efficiencies_03 = pandas.DataFrame(index=list(
Calibration_Hackett_C03['Process_Efficiencies']['Process']))
process_efficiencies_03['Hackett_C03'] = list(
Calibration_Hackett_C03['Process_Efficiencies']['Value'])
all_process_efficiencies = pandas.concat(
[process_efficiencies_005, process_efficiencies_01, process_efficiencies_016, process_efficiencies_022, process_efficiencies_03], axis=1)
all_process_efficiencies['ID'] = all_process_efficiencies.index
all_process_efficiencies.to_csv('Process_efficiencies_out.csv', sep=';', decimal=',')
########
########
Mus_o2 = [0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.28, 0.3, 0.35, 0.4]
O2_J = [0.8, 1.3, 2.5, 3.9, 5.3, 7, 7.4, 6.1, 5.1, 3.7]
Glc_J = [0.3, 0.6, 1.1, 1.7, 2.3, 2.8, 3.4, 4.5, 8.6, 11.1]
CO2_J = [0.8, 1.4, 2.7, 4.2, 5.7, 7.5, 8, 8.8, 14.9, 18.9]
EtOH_J = [0, 0, 0, 0, 0, 0, 0.11, 2.3, 9.5, 13.9]
Ac_J = [0, 0, 0, 0, 0, 0, 0.08, 0.41, 0.62, 0.6]
Glyc_J = [0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0.15]
## Hackett#
Mu_Hackett = [0.0498630244, 0.1054314572, 0.154377453333333, 0.2126503108, 0.293841410333333]
Glc_Hackett = [0.7367, 1.5462, 2.1722, 5.1571, 9.5962]
EtOH_Hackett = [0.0127, 0.0529, 0.1084, 4.6066, 14.0672]
Ac_Hackett = [0.0017, 0.0031, 0.0052, 0.4433, 0.8851]
Glyc_Hackett = [0.0035, 0.0077, 0.0065, 0.0579, 0.1699]
conditions = ['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03']
Mus_predicted = [Calibration_Hackett_C005['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C01['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C016['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C022['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C03['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic']]
Mus_predicted_euk = [Calibration_Hackett_C005['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C01['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C016['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C022['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C03['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic']]
Glc_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic'])]
EtOH_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic'])]
Ac_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic'])]
O2_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic'])]
Glycerol_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic'])]
###
Glc_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic'])]
EtOH_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic'])]
Ac_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic'])]
O2_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic'])]
Glycerol_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic'])]
###
fig, axs = plt.subplots(2, 3, figsize=(28, 7), sharex=True)
# plt.figure()
axs[0, 0].plot(Mu_Hackett, Mu_Hackett, color='lightgreen')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted, color='black')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted_euk, color='red')
axs[0, 0].legend(['Hackett', 'Prok.', 'Euk.'])
axs[0, 0].set_title('Predicted vs measured growth-rate')
axs[0, 0].set_ylabel('$\mu$ [$h^{-1}$]')
axs[0, 0].set_xlabel('$\mu$ [$h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 1].plot(Mus_o2, Glc_J, color='lightblue')
axs[0, 1].plot(Mu_Hackett, Glc_Hackett, color='lightgreen')
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted, color='black', alpha=0.8)
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[0, 1].set_title('Glucose-uptake rate')
axs[0, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 2].plot(Mus_o2, O2_J, color='lightblue')
# plt.plot(Mu_Hackett,Glc_Hackett,color='lightgreen')
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted, color='black', alpha=0.8)
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 2].legend(['<NAME>', 'Prok.', 'Euk.'])
axs[0, 2].set_title('Oxygen-uptake rate')
axs[0, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 0].plot(Mus_o2, EtOH_J, color='lightblue')
axs[1, 0].plot(Mu_Hackett, EtOH_Hackett, color='lightgreen')
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted, color='black', alpha=0.8)
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 0].legend(['van Hoek', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 0].set_title('Ethanol-excretion rate')
axs[1, 0].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 0].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 1].plot(Mus_o2, Ac_J, color='lightblue')
axs[1, 1].plot(Mu_Hackett, Ac_Hackett, color='lightgreen')
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted, color='black', alpha=0.8)
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 1].set_title('Acetate-excretion rate')
axs[1, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
axs[1, 2].plot(Mus_o2, Glyc_J, color='lightblue')
axs[1, 2].plot(Mu_Hackett, Glyc_Hackett, color='lightgreen')
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted, color='black', alpha=0.8)
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 2].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 2].set_title('Glycerol-excretion rate')
axs[1, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
plt.show()
protein_comparison_005 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C005['Proteome']['ID']))):
protein_comparison_005.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index):
protein_comparison_005.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C005['Proteome']['ID']):
protein_comparison_005.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C005['Proteome'].loc[Calibration_Hackett_C005['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_01 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C01['Proteome']['ID']))):
protein_comparison_01.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].index):
protein_comparison_01.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C01['Proteome']['ID']):
protein_comparison_01.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C01['Proteome'].loc[Calibration_Hackett_C01['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_016 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C016['Proteome']['ID']))):
protein_comparison_016.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].index):
protein_comparison_016.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C016['Proteome']['ID']):
protein_comparison_016.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C016['Proteome'].loc[Calibration_Hackett_C016['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_022 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C022['Proteome']['ID']))):
protein_comparison_022.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].index):
protein_comparison_022.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C022['Proteome']['ID']):
protein_comparison_022.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C022['Proteome'].loc[Calibration_Hackett_C022['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_03 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C03['Proteome']['ID']))):
protein_comparison_03.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].index):
protein_comparison_03.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C03['Proteome']['ID']):
protein_comparison_03.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C03['Proteome'].loc[Calibration_Hackett_C03['Proteome']
['ID'] == i, 'copy_number'].values[0]
fig, axs = plt.subplots(2, 3, figsize=(28, 7), sharex=True)
predidcted_proteins = protein_comparison_005.loc[(pandas.isna(protein_comparison_005['Predicted']) == False) & (
pandas.isna(protein_comparison_005['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_005.loc[(pandas.isna(protein_comparison_005['Predicted']) == False) & (
pandas.isna(protein_comparison_005['Measured']) == False), 'Measured']
x_reg = numpy.reshape(numpy.array(list(predidcted_proteins)), (len(list(predidcted_proteins)), 1))
y_reg = numpy.reshape(numpy.array(list(quantified_proteins)), (len(list(quantified_proteins)), 1))
regressor = LinearRegression(fit_intercept=False)
regressor.fit(x_reg, y_reg)
predictions = regressor.predict(x_reg)
axs[0, 0].scatter(numpy.log10(protein_comparison_005['Predicted']),
numpy.log10(protein_comparison_005['Measured']))
axs[0, 0].plot([11, 17], [11, 17], color='green')
axs[0, 0].plot(numpy.log10(x_reg), numpy.log10(predictions), color='red', linewidth=2)
axs[0, 0].legend(['Identity', str(regressor.coef_), 'Data'])
axs[0, 0].set_title('Protein-Protein (Log10) 0.05')
axs[0, 0].set_xlabel('Predicted')
axs[0, 0].set_ylabel('Measured')
predidcted_proteins = protein_comparison_01.loc[(pandas.isna(protein_comparison_01['Predicted']) == False) & (
pandas.isna(protein_comparison_01['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_01.loc[( | pandas.isna(protein_comparison_01['Predicted']) | pandas.isna |
from probatus.utils import preprocess_data, shap_calc, calculate_shap_importance, BaseFitComputePlotClass, \
preprocess_labels, get_single_scorer
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, check_cv
from sklearn.base import clone, is_classifier
from joblib import Parallel, delayed
class ShapRFECV(BaseFitComputePlotClass):
"""
This class performs Backwards Recursive Feature Elimination, using SHAP feature importance. At each round, for a
given feature set, starting from all available features, the following steps are applied:
1. (Optional) Tune the hyperparameters of the model using [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html)
or [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html?highlight=randomized#sklearn.model_selection.RandomizedSearchCV),
2. Apply Cross-validation (CV) to estimate the SHAP feature importance on the provided dataset. In each CV
iteration, the model is fitted on the train folds, and applied on the validation fold to estimate
SHAP feature importance.
3. Remove `step` lowest SHAP importance features from the dataset.
At the end of the process, the user can plot the performance of the model for each iteration, and select the
optimal number of features and the features set.
The functionality is similar to [RFECV](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html).
The main difference is removing the lowest importance features based on SHAP features importance. It also
supports the use of [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
and [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
passed as the `clf`, thanks to which` you can perform hyperparameter optimization at each step of the search.
hyperparameters of the model at each round, to tune the model for each features set. Lastly, it supports
categorical features (object and category dtype) and missing values in the data, as long as the model supports
them.
We recommend using [LGBMClassifier](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html),
because by default it handles missing values and categorical features. In case of other models, make sure to
handle these issues for your dataset and consider impact it might have on features importance.
Example:
```python
import numpy as np
import pandas as pd
from probatus.feature_elimination import ShapRFECV
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
feature_names = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20']
# Prepare two samples
X, y = make_classification(n_samples=200, class_sep=0.05, n_informative=6, n_features=20,
random_state=0, n_redundant=10, n_clusters_per_class=1)
X = pd.DataFrame(X, columns=feature_names)
# Prepare model and parameter search space
clf = RandomForestClassifier(max_depth=5, class_weight='balanced')
param_grid = {
'n_estimators': [5, 7, 10],
'min_samples_leaf': [3, 5, 7, 10],
}
search = RandomizedSearchCV(clf, param_grid)
# Run feature elimination
shap_elimination = ShapRFECV(
clf=search, step=0.2, cv=10, scoring='roc_auc', n_jobs=3)
report = shap_elimination.fit_compute(X, y)
# Make plots
performance_plot = shap_elimination.plot()
# Get final feature set
final_features_set = shap_elimination.get_reduced_features_set(num_features=3)
```
<img src="../img/shaprfecv.png" width="500" />
"""
def __init__(self, clf, step=1, min_features_to_select=1, cv=None, scoring='roc_auc', n_jobs=-1, verbose=0,
random_state=None):
"""
This method initializes the class:
Args:
clf (binary classifier, GridSearchCV or RandomizedSearchCV):
A model that will be optimized and trained at each round of features elimination. The recommended model
is [LGBMClassifier](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html),
because it by default handles the missing values and categorical variables. This parameter also supports
[GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
and [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html).
step (int or float, optional):
Number of lowest importance features removed each round. If it is an int, then each round such number of
features is discarded. If float, such percentage of remaining features (rounded down) is removed each
iteration. It is recommended to use float, since it is faster for a large number of features, and slows
down and becomes more precise towards less features. Note: the last round may remove fewer features in
order to reach min_features_to_select.
min_features_to_select (int, optional):
Minimum number of features to be kept. This is a stopping criterion of the feature elimination. By
default the process stops when one feature is left.
cv (int, cross-validation generator or an iterable, optional):
Determines the cross-validation splitting strategy. Compatible with sklearn
[cv parameter](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html).
If None, then cv of 5 is used.
scoring (string or probatus.utils.Scorer, optional):
Metric for which the model performance is calculated. It can be either a metric name aligned with
predefined [classification scorers names in sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html).
Another option is using probatus.utils.Scorer to define a custom metric.
n_jobs (int, optional):
Number of cores to run in parallel while fitting across folds. None means 1 unless in a
`joblib.parallel_backend` context. -1 means using all processors.
verbose (int, optional):
Controls verbosity of the output:
- 0 - nether prints nor warnings are shown
- 1 - 50 - only most important warnings
- 51 - 100 - shows other warnings and prints
- above 100 - presents all prints and all warnings (including SHAP warnings).
random_state (int, optional):
Random state set at each round of feature elimination. If it is None, the results will not be
reproducible and in random search at each iteration a different hyperparameters might be tested. For
reproducible results set it to integer.
"""
self.clf = clf
if isinstance(self.clf, RandomizedSearchCV) or isinstance(self.clf, GridSearchCV):
self.search_clf = True
else:
self.search_clf=False
if (isinstance(step, int) or isinstance(step, float)) and \
step > 0:
self.step = step
else:
raise (ValueError(f"The current value of step = {step} is not allowed. "
f"It needs to be a positive integer or positive float."))
if isinstance(min_features_to_select, int) and min_features_to_select>0:
self.min_features_to_select=min_features_to_select
else:
raise (ValueError(f"The current value of min_features_to_select = {min_features_to_select} is not allowed. "
f"It needs to be a positive integer."))
self.cv = cv
self.scorer = get_single_scorer(scoring)
self.random_state = random_state
self.n_jobs = n_jobs
self.report_df = pd.DataFrame([])
self.verbose = verbose
def _get_current_features_to_remove(self, shap_importance_df):
"""
Implements the logic used to determine which features to remove. If step is a positive integer,
at each round step lowest SHAP importance features are selected. If it is a float, such percentage
of remaining features (rounded up) is removed each iteration. It is recommended to use float, since it is
faster for a large set of features, and slows down and becomes more precise towards less features.
Args:
shap_importance_df (pd.DataFrame):
DataFrame presenting SHAP importance of remaining features.
Returns:
(list):
List of features to be removed at a given round.
"""
# If the step is an int remove n features.
if isinstance(self.step, int):
num_features_to_remove = self._calculate_number_of_features_to_remove(
current_num_of_features=shap_importance_df.shape[0],
num_features_to_remove=self.step,
min_num_features_to_keep=self.min_features_to_select
)
# If the step is a float remove n * number features that are left, rounded down
elif isinstance(self.step, float):
current_step = int(np.floor(shap_importance_df.shape[0] * self.step))
# The step after rounding down should be at least 1
if current_step < 1:
current_step = 1
num_features_to_remove = self._calculate_number_of_features_to_remove(
current_num_of_features=shap_importance_df.shape[0],
num_features_to_remove=current_step,
min_num_features_to_keep=self.min_features_to_select
)
if num_features_to_remove == 0:
return []
else:
return shap_importance_df.iloc[-num_features_to_remove:].index.tolist()
@staticmethod
def _calculate_number_of_features_to_remove(current_num_of_features, num_features_to_remove,
min_num_features_to_keep):
"""
Calculates the number of features to be removed, and makes sure that after removal at least
min_num_features_to_keep are kept
Args:
current_num_of_features (int):
Current number of features in the data.
num_features_to_remove (int):
Number of features to be removed at this stage.
min_num_features_to_keep (int):
Minimum number of features to be left after removal.
Returns:
(int):
Number of features to be removed.
"""
num_features_after_removal = current_num_of_features - num_features_to_remove
if num_features_after_removal >= min_num_features_to_keep:
num_to_remove = num_features_to_remove
else:
# take all available features minus number of them that should stay
num_to_remove = current_num_of_features - min_num_features_to_keep
return num_to_remove
def _report_current_results(self, round_number, current_features_set, features_to_remove, train_metric_mean,
train_metric_std, val_metric_mean, val_metric_std):
"""
This function adds the results from a current iteration to the report.
Args:
round_number (int):
Current number of the round.
current_features_set (list of str):
Current list of features.
features_to_remove (list of str):
List of features to be removed at the end of this iteration.
train_metric_mean (float or int):
Mean scoring metric measured on train set during CV.
train_metric_std (float or int):
Std scoring metric measured on train set during CV.
val_metric_mean (float or int):
Mean scoring metric measured on validation set during CV.
val_metric_std (float or int):
Std scoring metric measured on validation set during CV.
"""
current_results = {
'num_features': len(current_features_set),
'features_set': None,
'eliminated_features': None,
'train_metric_mean': train_metric_mean,
'train_metric_std': train_metric_std,
'val_metric_mean': val_metric_mean,
'val_metric_std': val_metric_std,
}
current_row = | pd.DataFrame(current_results, index=[round_number]) | pandas.DataFrame |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour(self.ordinal, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_map[base]
if mult == 1:
return "Period('%s', '%s')" % (formatted, freqstr)
return ("Period('%s', '%d%s')" % (formatted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
return ("%s" % formatted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_to_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not isinstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not isinstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if isinstance(freq, basestring):
base, mult = | _gfc(freq) | pandas.tseries.frequencies.get_freq_code |
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.datasets.multiclass_label_dataset import MulticlassLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
def test_multiclass_confusion_matrix():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 2],
[2, 1],
[2, 0],
[2, 2],
[2, 1]])
pred = data.copy()
pred[3,1] = 0
pred[4,1] = 2
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
favorable_values = [0,1]
unfavorable_values = [2]
mcld = MulticlassLabelDataset(favorable_label = favorable_values, unfavorable_label = unfavorable_values , df = df , label_names=['label'],
protected_attribute_names=['feat'])
mcld2 = MulticlassLabelDataset(favorable_label = favorable_values, unfavorable_label = unfavorable_values , df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(mcld, mcld2, unprivileged_groups=[{'feat': 2}],
privileged_groups=[{'feat': 0},{'feat': 1}])
confusion_matrix = cm.binary_confusion_matrix()
actual_labels_df = df[['label']].values
actual_labels_df2 = df2[['label']].values
assert np.all(actual_labels_df == mcld.labels)
assert np.all(actual_labels_df2 == mcld2.labels)
assert confusion_matrix == {'TP': 7.0, 'FN': 1.0, 'TN': 2.0, 'FP': 0.0}
fnr = cm.false_negative_rate_difference()
assert fnr == -0.2
def test_generalized_binary_confusion_matrix():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[1, 2],
[0, 0],
[0, 0],
[1, 2]])
pred = np.array([[0, 1, 0.8],
[0, 0, 0.6],
[1, 0, 0.7],
[1, 1, 0.8],
[1, 2, 0.36],
[1, 0, 0.82],
[1, 1, 0.79],
[0, 2, 0.42],
[0, 1, 0.81],
[1, 2, 0.3]])
df = | pd.DataFrame(data, columns=['feat', 'label']) | pandas.DataFrame |
#!/usr/bin/env python3
"""Combine 2d and 3d hotspot files. Add mutation type counts per hotspot. Then filter out the hotspots not following the threshold."""
import argparse
import pandas as pd
import numpy as np
import sys
import re
import hgvs.parser
PARSER = hgvs.parser.Parser()
def count_variant_types(row):
MISSENSE_INDEX = 0
TRUNC_INDEX = 1
# FS_INDEX = 2
INFRAME_INDEX = 2
SPLICE_INDEX = 3
TOTAL_INDEX = 4
INDEX_NAMES = "missense trunc inframe splice total".split()
rv = [0, 0, 0, 0, 0]
if row.type == "3d":
# ignore 3d hotspot
return pd.Series([np.nan]*len(INDEX_NAMES), index=INDEX_NAMES)
for v in row.variant_amino_acid.split("|"):
# add total
rv[TOTAL_INDEX] += int(v.split(":")[1])
if "*" in v:
rv[TRUNC_INDEX] += int(v.split(":")[1])
elif "sp" in v:
rv[SPLICE_INDEX] += int(v.split(":")[1])
else:
# parse hgvs to get protein change length (can always use TP53, don't care about which protein)
if row.type == "in-frame indel":
sv = PARSER.parse_hgvs_variant("TP53:p.{}".format(v.split(":")[0]))
elif row.type == "single residue":
sv = PARSER.parse_hgvs_variant("TP53:p.{}{}".format(row.residue, v.split(":")[0]))
else:
raise(Exception("unknown Type: {}".format(row.type)))
if sv.posedit.pos.start == sv.posedit.pos.end and sv.posedit.length_change() == 0:
rv[MISSENSE_INDEX] += int(v.split(":")[1])
else:
rv[INFRAME_INDEX] += int(v.split(":")[1])
return pd.Series(rv, index=INDEX_NAMES)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("hotspots_2d", default="../data/hotspots/v2_multi_type_residue.txt", type=str, help="2D cancerhotspots data file")
parser.add_argument("hotspots_3d", default="../data/hotspots/3d_hotspots.txt", type=str, help="3D cancerhotspots data file")
parser.add_argument("--removed_hotspots", default=None, type=str, help='Output removed hotspots')
parser.add_argument("--override_unassigned_transcript_id_2d_hotspots", default=None, required=True, type=str,
help='Override transcript_id field for 2d hotspots without assignment')
parser.add_argument("--override_unassigned_transcript_id_3d_hotspots", default=None, required=True, type=str,
help='Override transcript_id field for 3d hotspots without assignment')
args = parser.parse_args()
hotspots_2d = | pd.read_csv(args.hotspots_2d, sep="\t") | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = | DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate separated audio from a DCASE 2020 task 4 separation model."""
import os
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import inference
from train import data_io
from train import metrics
from train import permutation_invariant
def _weights_for_nonzero_refs(source_waveforms):
"""Return shape (source,) weights for signals that are nonzero."""
source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms), axis=-1))
return tf.greater(source_norms, 1e-8)
def _weights_for_active_seps(power_sources, power_separated):
"""Return (source,) weights for active separated signals."""
min_power = tf.reduce_min(power_sources, axis=-1, keepdims=True)
return tf.greater(power_separated, 0.01 * min_power)
def compute_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
# Align separated sources to reference sources.
perm_inv_loss = permutation_invariant.wrap(
lambda tar, est: -metrics.signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms[tf.newaxis],
separated_waveforms[tf.newaxis])
separated_waveforms = separated_waveforms[0] # Remove batch axis.
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = metrics.signal_to_noise_ratio_gain_invariant(
separated_waveforms, source_waveforms)
num_active_refs = tf.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = metrics.signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform[tf.newaxis], (source_waveforms.shape[0], 1)),
source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs}
def _report_score_stats(metric_per_source_count, label='', counts=None):
"""Report mean and std dev for specified counts."""
values_all = []
if counts is None:
counts = metric_per_source_count.keys()
for count in counts:
values = metric_per_source_count[count]
values_all.extend(list(values))
return '%s for count(s) %s = %.1f +/- %.1f dB' % (
label, counts, np.mean(values_all), np.std(values_all))
def evaluate(checkpoint_path, metagraph_path, data_list_path, output_path):
"""Evaluate a model on FUSS data."""
model = inference.SeparationModel(checkpoint_path, metagraph_path)
file_list = data_io.read_lines_from_file(data_list_path, skip_fields=1)
with model.graph.as_default():
dataset = data_io.wavs_to_dataset(file_list, batch_size=1,
num_samples=160000,
repeat=False)
# Strip batch and mic dimensions.
dataset['receiver_audio'] = dataset['receiver_audio'][0, 0]
dataset['source_images'] = dataset['source_images'][0, :, 0]
# Separate with a trained model.
i = 1
max_count = 4
dict_per_source_count = lambda: {c: [] for c in range(1, max_count + 1)}
sisnr_per_source_count = dict_per_source_count()
sisnri_per_source_count = dict_per_source_count()
under_seps = []
equal_seps = []
over_seps = []
df = None
while True:
try:
waveforms = model.sess.run(dataset)
except tf.errors.OutOfRangeError:
break
separated_waveforms = model.separate(waveforms['receiver_audio'])
source_waveforms = waveforms['source_images']
if np.allclose(source_waveforms, 0):
print('WARNING: all-zeros source_waveforms tensor encountered.'
'Skiping this example...')
continue
metrics_dict = compute_metrics(source_waveforms, separated_waveforms,
waveforms['receiver_audio'])
metrics_dict = {k: v.numpy() for k, v in metrics_dict.items()}
sisnr_sep = metrics_dict['sisnr_separated']
sisnr_mix = metrics_dict['sisnr_mixture']
sisnr_imp = metrics_dict['sisnr_improvement']
weights_active_pairs = metrics_dict['weights_active_pairs']
# Create and initialize the dataframe if it doesn't exist.
if df is None:
# Need to create the dataframe.
columns = []
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
columns.append(metric_name + '_source%d' % i_src)
else:
# Scalar metric.
columns.append(metric_name)
columns.sort()
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
import altair as alt
import vega_datasets
import pandas as pd
import numpy as np
#import dash_canvas
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, assets_folder='assets',external_stylesheets=[dbc.themes.CERULEAN])
app.config['suppress_callback_exceptions'] = True
server = app.server
app.title = 'Dash app for DSCI 532 group - 103'
def chart1():
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 18,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
'view': {
"height": 300,
"width": 400
},
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
from vega_datasets import data
states = alt.topo_feature(data.us_10m.url, 'states')
hate_crime = pd.read_csv('../data/crime_state_id_clean.csv')
p1 =alt.Chart(states).mark_geoshape().encode(
alt.Color('avg_hatecrimes_per_100k_fbi:Q',title="Average hate crime per 100K"),
tooltip = [
alt.Tooltip('avg_hatecrimes_per_100k_fbi:Q', title = 'Average hate crime per 100K'),
alt.Tooltip('state:N')
]
).transform_lookup(
lookup='id',
from_=alt.LookupData(hate_crime, 'id', ['avg_hatecrimes_per_100k_fbi','state'])
).project('albersUsa').properties(
title='Average hate crimes per 100K population in each state',
width=550,
height=300)
return p1
def chart2(x_val = 'gini_index'):
df = | pd.read_csv('../data/hate_crimes.csv') | pandas.read_csv |
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = pd.DataFrame()
modes = ['_'.join(x.split('_')[4:]) for x in modes]
corr_df['Comparison'] = modes
for i in range(len(modes)):
corr_df[modes[i]]=corr_matrix[i,:]
corr_df.to_csv(outdir+'model_correlations.csv')
def fetch_missing_dockq(marks_dockq_AF_bb,marks_dockq_AF_aa):
'''Fetch missing DockQ scores
'''
ids = ['_'.join(x.split('-')) for x in marks_dockq_AF_aa.complex_id.values]
#Get mising scores
missing = marks_dockq_AF_bb[~marks_dockq_AF_bb.complex_id.isin(ids)]
ids = [x[:6]+'-'+x[7:] for x in missing.complex_id.values]
missing['complex_id']=ids
marks_dockq_AF_aa = pd.concat([marks_dockq_AF_aa,missing[marks_dockq_AF_aa.columns]])
return marks_dockq_AF_aa
def pdockq(if_plddt_contacts, dockq_scores, outdir):
#pdockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
#Create RA
x_ra = []
y_ra = []
y_std = []
y_av_err = []
step = 20
for t in np.arange(0,max(if_plddt_contacts)-step,step):
inds = np.argwhere((if_plddt_contacts>=t)&(if_plddt_contacts<t+step))[:,0]
x_ra.append(t+step/2)
y_ra.append(np.average(dockq_scores[inds]))
y_std.append(np.std(dockq_scores[inds]))
y_av_err.append(np.average(np.absolute(dockq_scores[inds]-y_ra[-1])))
#Do a simple sigmoid fit
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
xdata = if_plddt_contacts[np.argsort(if_plddt_contacts)]
ydata = dockq_scores[np.argsort(if_plddt_contacts)]
p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, xdata, ydata,p0, method='dogbox')
y = sigmoid(xdata, *popt)
plt.plot(xdata,y,color='r',label='Sigmoidal fit')
#Calc error
print('Sigmoid params:',*popt)
plt.scatter(if_plddt_contacts,dockq_scores,s=1)
#plt.plot(x_ra,y_ra,label='Running average', color='tab:blue')
#plt.fill_between(x_ra,np.array(y_ra)-np.array(y_av_err),np.array(y_ra)+np.array(y_av_err),color='tab:blue',alpha=0.25, label='Average error')
plt.title('pDockQ')
plt.xlabel('IF plDDT⋅log(IF contacts)')
plt.ylabel('DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pDockQ.svg',format='svg',dpi=300)
plt.close()
print('Average error for sigmoidal fit:',np.average(np.absolute(y-ydata)))
print('L=',np.round(popt[0],3),'x0=',np.round(popt[1],3) ,'k=',np.round(popt[2],3), 'b=',np.round(popt[3],3))
return popt
def ROC_pred_marks(marks_dockq_AF, plDDT_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_marks['complex_id']=plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on=['complex_id'],how='inner')
#Get min of chains
single_chain_plddt = np.min(merged[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
merged['min_chain_plddt_av_1'] = single_chain_plddt
#Analyze ROC as a function of
plDDT_metrics = ['if_plddt_av', 'min_chain_plddt_av',
'plddt_av', 'num_atoms_in_interface', 'num_res_in_interface']
plDDT_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
run='1'
dockq_scores = merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run'+run].values
correct = np.zeros(len(dockq_scores))
correct[np.argwhere(dockq_scores>=0.23)]=1
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
colors = {0:'darkblue',1:'magenta',2:'orange',3:'darkgreen',4:'tab:blue',5:'tab:yellow',6:'tab:black'}
for i in range(len(plDDT_metrics)):
plDDT_metric_vals = merged[plDDT_metrics[i]+'_'+run].values
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, plDDT_metric_vals, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
label = plDDT_metrics[i]
plt.plot(fpr, tpr, label = plDDT_nice_names[label]+': AUC = %0.2f' % roc_auc,color=colors[i])
#Add log(if contacts)*if_plddt_av
if_plddt_contacts = merged['if_plddt_av_1'].values*np.log10(merged['num_atoms_in_interface_1'].values+1)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, if_plddt_contacts, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Get pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
sigmoid_params = pdockq(if_plddt_contacts, dockq_scores, outdir)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, sigmoid(if_plddt_contacts,*sigmoid_params), pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('ROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_marks.svg',format='svg',dpi=300)
plt.close()
#pDockQ vs DockQ
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(sigmoid(if_plddt_contacts,*sigmoid_params),dockq_scores,s=1)
plt.title('pDockQ vs DockQ')
plt.xlabel('pDockQ')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pdockq_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#plot if plddt vs log contacts and color by dockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['num_atoms_in_interface_1'].values+1, merged['if_plddt_av_1'].values,c=dockq_scores,s=2)
cbar = plt.colorbar()
cbar.set_label('DockQ')
plt.xscale('log')
plt.ylim([40,100])
plt.title('Interface contacts, plDDT and DockQ')
plt.xlabel('Interface contacts')
plt.ylabel('Average interface plDDT')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'if_conctacts_vs_plddt.svg',format='svg',dpi=300)
plt.close()
return sigmoid_params
def score_marks_5runs_paired_af(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run3','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('Test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using av plDDT*log(if_contacts) in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('AUC using the same ranking', roc_auc)
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=3,color='tab:blue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=3,color='mediumseagreen',label='Min')
plt.title('Model ranking on the test set')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_marks_5runs.svg',format='svg',dpi=300)
plt.close()
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_af2']=first_ranked_scores
marks_dockq_AF['top_ranked_pDockQ']=first_ranked_separators
marks_dockq_AF['top_ranked_model_run_af2']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_af2_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_af2_marks_reduced.csv')
return marks_dockq_AF
def score_marks_5runs_paired_fused(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run3','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('FUSED test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using if_plddt_av and num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('FUSED AUC using the same ranking', roc_auc)
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_fused']=first_ranked_scores
marks_dockq_AF['top_ranked_model_run_fused']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_fused_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_fused_marks_reduced.csv')
return marks_dockq_AF
def marks_box(marks_dockq_AF, marks_dockq_GRAMM, marks_dockq_mdockpp, marks_dockq_TMfull, marks_dockq_TMint, marks_dockq_RF,outdir):
'''Box df of Marks set
'''
marks_dockq_TMint = marks_dockq_TMint.dropna()
marks_dockq_TMfull = marks_dockq_TMfull.dropna()
#Get data
rf_scores = marks_dockq_RF.DockQ_dockqstats_marks_RF.values
gramm_scores = marks_dockq_GRAMM[1].values
mdockpp_scores = marks_dockq_mdockpp.DockQ.values
TMfull_scores = marks_dockq_TMfull.dockq.values
TMint_scores = marks_dockq_TMint.dockq.values
paired_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values
af2_std_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values
run1_both_scores= marks_dockq_AF.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values
run1_fused_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1.values
top_paired_af_scores = marks_dockq_AF.top_ranked_model_DockQ_af2.values
top_paired_fused_scores = marks_dockq_AF.top_ranked_model_DockQ_fused.values
data1 = [rf_scores, gramm_scores, mdockpp_scores, TMint_scores, af2_std_scores, paired_scores, top_paired_af_scores, top_paired_fused_scores]
data2 = [run1_both_scores, run1_fused_scores, top_paired_af_scores,top_paired_fused_scores]
all_data = [data1,data2]
xlabels1 = ['RF','GRAMM', 'MDockPP', 'TMdock\nInterfaces', 'AF2', 'Paired', 'AF2+Paired\ntop ranked','Block+Paired\ntop ranked']
xlabels2 = ['AF2+Paired', 'Block+Paired', 'AF2+Paired\ntop ranked', 'Block+Paired\ntop ranked']
all_xlabels = [xlabels1, xlabels2]
#Color
colors = sns.husl_palette(len(xlabels1)+2)
all_colors = [colors[:len(xlabels1)],colors[-len(xlabels2):]]
for i in range(len(all_data)):
#Boxplot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
data = all_data[i] #Get data and xlabel variation
xlabels = all_xlabels[i]
colors = all_colors[i]
#Success rates
srs = []
for j in range(len(data)):
sr = np.argwhere(data[j]>=0.23).shape[0]/len(data[j])
median = np.median(data[j])
print(xlabels[j],'sr:',np.round(sr,3),len(data[j]),median)
#xlabels[j]+='\nSR: '+str(np.round(100*sr,1))+'%'
#xlabels[j]+='\nM: '+str(np.round(median,3))
# Creating plot
#ax.violinplot(data)
bp = ax.boxplot(data, patch_artist = True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.75)
# changing color and linewidth of
# medians
for median in bp['medians']:
median.set(color ='k',linewidth = 3)
# #Add swarm
# for i in range(len(data)):
# # Add some random "jitter" to the x-axis
# x = np.random.normal(i, 0.04, size=len(data[i]))
# plt.plot(x+1, data[i], 'r.', alpha=0.2)
# changing color and linewidth of
# whiskers
for whisker in bp['whiskers']:
whisker.set(color ='grey',
linewidth = 1)
# changing color and linewidth of
# caps
for cap in bp['caps']:
cap.set(color ='grey',
linewidth = 1)
plt.title('DockQ scores for the test set',fontsize=20)
plt.xticks(np.arange(1,len(xlabels)+1),xlabels,fontsize=12)
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'DockQ_box_test'+str(i)+'.svg',format='svg',dpi=300)
plt.close()
def AF_vs_RF_marks(marks_dockq_RF,marks_dockq_AF, outdir):
'''Compare the scores for RF vs AF
'''
merged = pd.merge(marks_dockq_RF,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks RF and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ_dockqstats_marks_RF'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('RF vs AF2 performance on the test set')
plt.xlabel('RF DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'RF_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_RF = np.argwhere(merged['DockQ_dockqstats_marks_RF'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate RF:',num_correct_RF,'out of',num_total,'|',np.round(100*num_correct_RF/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where RF outperforms AF
scores = merged[['DockQ_dockqstats_marks_RF','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
rf_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(rf_pos,axis=1)
print('RF outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(rf_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(rf_pos))
def AF_vs_GRAMM_marks(marks_dockq_GRAMM, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_GRAMM = marks_dockq_GRAMM.rename(columns={1: 'DockQ GRAMM'})
marks_dockq_GRAMM['complex_id'] = ['_'.join(x.split('-')) for x in marks_dockq_GRAMM[0]]
merged = pd.merge(marks_dockq_GRAMM,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks GRAMM and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ GRAMM'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('GRAMM vs AF2 performance on the test set')
plt.xlabel('GRAMM DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'GRAMM_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_GRAMM = np.argwhere(merged['DockQ GRAMM'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate GRAMM:',num_correct_GRAMM,'out of',num_total,'|',np.round(100*num_correct_GRAMM/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ GRAMM','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
GRAMM_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(GRAMM_pos,axis=1)
print('GRAMM outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(GRAMM_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(GRAMM_pos))
def AF_vs_TMint_marks(marks_dockq_TMint, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_TMint = marks_dockq_TMint.rename(columns={'dockq': 'DockQ TMint'})
merged = pd.merge(marks_dockq_TMint,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks TMint and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ TMint'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('TMint vs AF2 performance on the test set')
plt.xlabel('TMint DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'TMint_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_TMint = np.argwhere(merged['DockQ TMint'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate TMint:',num_correct_TMint,'out of',num_total,'|',np.round(100*num_correct_TMint/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ TMint','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
TMint_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(TMint_pos,axis=1)
print('TMint outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(TMint_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(TMint_pos))
def real_features_marks(marks_dockq_AF, dssp_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
real structures
'''
#Change DSSP df
dssp_marks['Helix']=dssp_marks.G+dssp_marks.H+dssp_marks.I
dssp_marks['Sheet']=dssp_marks.E+dssp_marks.B
dssp_marks['Loop']=dssp_marks[' '].values
ss = dssp_marks[['Helix','Sheet','Loop']].values #0,1,2
dssp_marks['ss_class']=np.argmax(dssp_marks[['Helix','Sheet','Loop']].values,axis=1)
dssp_marks = dssp_marks[['id1','id2','ss_class']]
#Merge dfs
dssp_marks['complex_id']=dssp_marks.id1+'-'+dssp_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged_dssp = pd.merge(marks_dockq_AF,dssp_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,aln_scores_marks,on=['complex_id'],how='inner')
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
merged_if = pd.merge(merged_if,AFneffs_marks,on=['complex_id'],how='inner')
merged_if = | pd.merge(merged_if,topneffs_marks,on=['complex_id'],how='inner') | pandas.merge |
"""
{This script plots SMF and SMHM from results of the mcmc including best fit and
68th percentile of lowest chi-squared values. This is compared to data and is
done for all 3 surveys: ECO, RESOLVE-A and RESOLVE-B.}
"""
# Matplotlib backend
# import matplotlib
# matplotlib.use('Agg')
# Libs
from halotools.empirical_models import PrebuiltSubhaloModelFactory
from cosmo_utils.utils.stats_funcs import Stats_one_arr
from halotools.sim_manager import CachedHaloCatalog
from cosmo_utils.utils import work_paths as cwpaths
from matplotlib.legend_handler import HandlerTuple
from matplotlib.legend_handler import HandlerBase
from scipy.stats import binned_statistic as bs
from collections import OrderedDict
from multiprocessing import Pool
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
import numpy as np
import argparse
import random
import math
import time
import os
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=25)
rc('text', usetex=True)
rc('text.latex', preamble=r"\usepackage{amsmath}")
rc('axes', linewidth=2)
rc('xtick.major', width=4, size=7)
rc('ytick.major', width=4, size=7)
rc('xtick.minor', width=2, size=7)
rc('ytick.minor', width=2, size=7)
def read_chi2(path_to_file):
"""
Reads chi-squared values from file
Parameters
----------
path_to_file: string
Path to chi-squared values file
Returns
---------
chi2: array
Array of reshaped chi^2 values to match chain values
"""
chi2_df = pd.read_csv(path_to_file,header=None,names=['chisquared'])
chi2 = chi2_df.chisquared.values
return chi2
def read_mcmc(path_to_file):
"""
Reads mcmc chain from file
Parameters
----------
path_to_file: string
Path to mcmc chain file
Returns
---------
emcee_table: pandas dataframe
Dataframe of mcmc chain values with NANs removed
"""
colnames = ['mstar_q','mh_q','mu','nu']
emcee_table = pd.read_csv(path_to_file, names=colnames,
delim_whitespace=True, header=None)
emcee_table = emcee_table[emcee_table.mstar_q.values != '#']
emcee_table.mstar_q = emcee_table.mstar_q.astype(np.float64)
emcee_table.mh_q = emcee_table.mh_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
return emcee_table
def mock_add_grpcz(mock_df):
grpcz = mock_df.groupby('groupid').cz.mean().values
grpn = mock_df.groupby('groupid').cz.size().values
full_grpcz_arr = np.repeat(grpcz, grpn)
mock_df['grpcz'] = full_grpcz_arr
return mock_df
def read_mock_catl(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = | pd.read_hdf(filename) | pandas.read_hdf |
#!/usr/bin/env python3
import unittest
from unittest.mock import patch, call
import pandas as pd
from pandas.util.testing import assert_frame_equal
from word_search_puzzle.word_search_solver import WordSearchPuzzle
class WordSearchPuzzleTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.word_search_puzzle = r"puzzles/test_word_search_puzzle.txt"
cls.word_search_set = r"puzzles/test_word_search_set.txt"
# initiate the class for the whole test
cls.ws = WordSearchPuzzle(cls.word_search_puzzle, cls.word_search_set)
def test_get_puzzle_size(self):
# should get an error when an invalid file path is given
with self.assertRaises(AssertionError):
self.ws._get_puzzle_size('/not/a/path.txt')
# Return a tuple
result = self.ws._get_puzzle_size(self.word_search_puzzle) # -> (14, 14)
self.assertTrue(isinstance(result, tuple))
width, height = result
self.assertEqual(width, 14)
self.assertEqual(height, 14)
def test_create_empty_dataframe(self):
# should get an error when an invalid file path is given
with self.assertRaises(AssertionError):
self.ws._create_empty_dataframe('/not/a/path.txt')
# create a DataFrame
result = self.ws._create_empty_dataframe(self.word_search_puzzle)
self.assertTrue(isinstance(result, pd.DataFrame))
# DataFrame should have the size of the puzzle
width, height = result.shape[:2]
self.assertEqual(width, 14)
self.assertEqual(height, 14)
for index, row in result.iterrows():
for cell in row:
# every cell should contain a space character
self.assertEqual(cell, chr(32), 'DataFrame cell should contain a "space" character')
def test_create_puzzle_dataframe(self):
# should get an error when an invalid file path is given
with self.assertRaises(AssertionError):
self.ws._get_puzzle_size('/not/a/path.txt')
# create a DataFrame
result = self.ws._create_puzzle_dataframe(self.word_search_puzzle)
self.assertTrue(isinstance(result, pd.DataFrame))
# DataFrame should have the size of the puzzle
width, height = result.shape[:2]
self.assertEqual(width, 14)
self.assertEqual(height, 14)
# comprehension list creates 2d-list of the puzzle file
with open(self.word_search_puzzle) as file:
puzzle_file = file.readline()
line_2d_list = [[letter for letter in line] for line in puzzle_file.split('\n') if line]
for line_list, (index, row) in zip(line_2d_list, result.iterrows()):
# line and row should be the same
self.assertEqual(line_list, row.tolist())
for line_letter, row_letter in zip(line_list, row):
# checking if the letter on the line and the letter on the row is the same
self.assertEqual(line_letter, row_letter)
def test_create_position_dataframe(self):
# should get an error when an invalid object is given
with self.assertRaises(AssertionError):
self.ws._create_position_dataframe('puzzle_df')
# create a DataFrame
result = self.ws._create_position_dataframe(self.ws.puzzle_df)
self.assertTrue(isinstance(result, pd.DataFrame))
# DataFrame should have the size of the puzzle
width, height = result.shape[:2]
self.assertEqual(width, 14)
self.assertEqual(height, 14)
# comprehension list creates 2d-list of the puzzle file
with open(self.word_search_puzzle) as file:
puzzle_file = file.readline()
pos_2d_list = [[(x, y) for x, _ in enumerate(_)] for y, _ in enumerate(puzzle_file.split('\n')) if _]
for pos_list, (index, row) in zip(pos_2d_list, result.iterrows()):
# pos_list and row should be the same
self.assertEqual(pos_list, row.tolist())
for list_pos, row_pos in zip(pos_list, row):
# checking if the position on the row is a tuple
self.assertTrue(isinstance(row_pos, tuple))
# checking if the position on the row is indeed made up of an (x, y) formation
self.assertEqual(list_pos, row_pos)
def test_create_word_set(self):
# should get an error when an invalid file path is given
with self.assertRaises(AssertionError):
self.ws._create_word_set('/not/a/path.txt')
with open(self.word_search_set) as file:
lenght_file = len(file.readlines())
# create a set
result = self.ws._create_word_set(self.word_search_set)
self.assertTrue(isinstance(result, set))
# 33 words are given in the word_search_list
# len(word_search_list) == 28
# method should parse the different data correctly
self.assertGreater(len(result), lenght_file)
self.assertEqual(35, len(result))
def test_get_turned_DataFrame(self):
# should get an error when an invalid type is given to DataFrame
with self.assertRaises(AssertionError):
self.ws.get_turned_dataframe(dataframe='dataframe', times=2)
# should get an error when an invalid type is given to times
with self.assertRaises(AssertionError):
self.ws.get_turned_dataframe(dataframe=pd.DataFrame, times='2')
# get data to test
# DataFrame[column][row]
dataframe = self.ws.puzzle_df.copy()
top_right = dataframe[13][0] # -> '+'
bottom_left = dataframe[0][13] # -> '-'
# DataFrame turns counter clockwise, so 0 -> 90 -> 180 -> 270 -> 360
dataframe_0deg = self.ws.get_turned_dataframe(dataframe, times=0)
dataframe_90deg = self.ws.get_turned_dataframe(dataframe, times=1)
dataframe_180deg = self.ws.get_turned_dataframe(dataframe, times=2)
dataframe_270deg = self.ws.get_turned_dataframe(dataframe, times=3)
dataframe_360deg = self.ws.get_turned_dataframe(dataframe, times=4)
# should return a pandas.DataFrame
self.assertTrue(isinstance(dataframe_0deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_90deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_180deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_270deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_360deg, pd.DataFrame))
# test the data
self.assertIsNone(assert_frame_equal(dataframe, dataframe_0deg)) # times=0
self.assertIsNone(assert_frame_equal(dataframe, dataframe_360deg)) # times=4
self.assertEqual(top_right, dataframe_0deg[13][0]) # -> '+'
self.assertEqual(bottom_left, dataframe_0deg[0][13]) # -> '-'
self.assertEqual(top_right, dataframe_360deg[13][0]) # -> '+'
self.assertEqual(bottom_left, dataframe_360deg[0][13]) # -> '-'
self.assertEqual(top_right, dataframe_90deg[0][0]) # -> '+'
self.assertEqual(bottom_left, dataframe_90deg[13][13]) # -> '-'
self.assertEqual(top_right, dataframe_180deg[0][13]) # -> '+'
self.assertEqual(bottom_left, dataframe_180deg[13][0]) # -> '-'
self.assertEqual(top_right, dataframe_270deg[13][13]) # -> '+'
self.assertEqual(bottom_left, dataframe_270deg[0][0]) # -> '-'
def test_get_diagonal_dataframe(self):
# should get an error when an invalid type is given to DataFrame
with self.assertRaises(AssertionError):
self.ws.get_diagonal_dataframe(dataframe='dataframe')
# get data to test
# DataFrame[column][row]
dataframe = self.ws.puzzle_df.copy()
top_right = dataframe[13][0] # -> '+'
top_left = dataframe[0][0] # -> 'a'
bottom_left = dataframe[0][13] # -> '-'
bottom_right = dataframe[13][13] # -> 'z'
dataframe_0deg = self.ws.get_turned_dataframe(dataframe, times=0)
dataframe_90deg = self.ws.get_turned_dataframe(dataframe, times=1)
dataframe_180deg = self.ws.get_turned_dataframe(dataframe, times=2)
dataframe_270deg = self.ws.get_turned_dataframe(dataframe, times=3)
# dataframe_360deg = self.ws.get_turned_dataframe(dataframe, times=4)
# assuming 0/360 degrees is facing north
dataframe_315deg = self.ws.get_diagonal_dataframe(dataframe_0deg)
dataframe_45deg = self.ws.get_diagonal_dataframe(dataframe_90deg)
dataframe_135deg = self.ws.get_diagonal_dataframe(dataframe_180deg)
dataframe_225deg = self.ws.get_diagonal_dataframe(dataframe_270deg)
# should return a pandas.DataFrame
self.assertTrue(isinstance(dataframe_315deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_45deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_135deg, pd.DataFrame))
self.assertTrue(isinstance(dataframe_225deg, pd.DataFrame))
# face 0deg, slice at 315deg (bottom left to top right)
self.assertEqual(bottom_left, dataframe_315deg[0][0]) # -> '-'
self.assertEqual(top_left, dataframe_315deg[0][13]) # -> 'a'
self.assertEqual(bottom_right, dataframe_315deg[13][13]) # -> 'z'
self.assertEqual(top_right, dataframe_315deg[0][26]) # -> '+'
# face at 90deg, slice at 45deg (bottom left to top right)
self.assertEqual(top_left, dataframe_45deg[0][0]) # -> 'a'
self.assertEqual(top_right, dataframe_45deg[0][13]) # -> '+'
self.assertEqual(bottom_left, dataframe_45deg[13][13]) # -> '-'
self.assertEqual(bottom_right, dataframe_45deg[0][26]) # -> 'z'
# face at 180deg, slice at 135deg (bottom left to top right)
self.assertEqual(top_right, dataframe_135deg[0][0]) # -> '+'
self.assertEqual(bottom_right, dataframe_135deg[0][13]) # -> 'z'
self.assertEqual(top_left, dataframe_135deg[13][13]) # -> 'a'
self.assertEqual(bottom_left, dataframe_135deg[0][26]) # -> '-'
# face at 270deg, slice at 225deg (bottom left to top right)
self.assertEqual(bottom_right, dataframe_225deg[0][0]) # -> 'z'
self.assertEqual(bottom_left, dataframe_225deg[0][13]) # -> '-'
self.assertEqual(top_right, dataframe_225deg[13][13]) # -> '+'
self.assertEqual(top_left, dataframe_225deg[0][26]) # -> 'a'
def test_get_all_posibilities(self):
# should get an error when an invalid type is given to dataframe
with self.assertRaises(AssertionError):
self.ws.get_all_possibilities(dataframe='dataframe')
dataframe = self.ws.puzzle_df.copy()
result = self.ws.get_all_possibilities(dataframe=dataframe)
# should return a pandas.DataFrame
self.assertTrue(isinstance(result, pd.DataFrame))
height, width = result.shape
# 0deg, 90deg, 180deg, 270deg -> height 14
# 45deg, 135deg, 225deg, 315deg - height 27 ( height + width - 1 )
# combined = 8 DataFrame's
# 4 * 14(height) + 4 * 27 = 164
# the total of rows of the result should be 164
# the total of columns should be 14
self.assertEqual(width, 14) # total columns
self.assertEqual(height, 164) # total rows
def test_dataframe_letter_position_pair(self):
dataframe = self.ws.puzzle_df.copy()
positionframe = self.ws.position_df.copy()
data_height, data_width = dataframe.shape
pos_height, pos_width = positionframe.shape
self.assertEqual(pos_height, data_height) # 14 == 14
self.assertEqual(pos_width, data_width) # 14 == 14
combined_data = self.ws.get_all_possibilities(dataframe)
combined_positions = self.ws.get_all_possibilities(positionframe)
data_height, data_width = combined_data.shape
pos_height, pos_width = combined_positions.shape
self.assertEqual(pos_height, data_height) # 164 == 164
self.assertEqual(pos_width, data_width) # 14 == 14
# assuming the combined_data and the combined_positions are made the same
# the position of certain letter could be traced back to the original puzzle_df
#
# example:
# combined_data[column 3][row 101] -> 's'
# L-> combined_positions[column 3][row 101] -> (4, 6)
# L-> puzzle_df[column 4][row 6] -> 's'
for row, row_values in combined_data.iterrows(): # row: int
for column, _ in enumerate(row_values): # column: int
letter = combined_data[column][row] # letter of combined
self.assertTrue(isinstance(letter, str))
if len(letter.strip()) == 0:
continue
position = combined_positions[column][row] # position of combined
self.assertTrue(isinstance(position, tuple))
pos_x, pos_y = position
puzzle_letter = self.ws.puzzle_df[pos_x][pos_y] # letter of position
self.assertTrue(isinstance(puzzle_letter, str))
self.assertEqual(letter, puzzle_letter)
# print(f'{letter} == {puzzle_letter} puzzle coordinates: {position}')
def test_find_word_with_coordinates(self):
dataframe = self.ws.puzzle_df.copy()
# should get an error when an invalid type is given to DataFrame
with self.assertRaises(AssertionError):
self.ws.find_word_with_coordinates(dataframe='dataframe', coordinates=pd.Series())
# should get an error when an invalid type is given to coordinates
with self.assertRaises(AssertionError):
self.ws.find_word_with_coordinates(dataframe=pd.DataFrame(), coordinates='coordinates')
# should get an error when an invalid series of coordinates is given to coordinates
with self.assertRaises(AssertionError):
wrond_data = pd.Series(((0, 1), (2, 3), (4, 5, 6))) # coordinates are not all of lenght: 2
self.ws.find_word_with_coordinates(dataframe=pd.DataFrame(), coordinates=wrond_data)
# should get an error when an invalid series of coordinates is given to coordinates
with self.assertRaises(AssertionError):
wrond_data = pd.Series(((1, ), (2, 3), (4, 5))) # coordinates are not all of lenght: 2
self.ws.find_word_with_coordinates(dataframe=pd.DataFrame(), coordinates=wrond_data)
# should return None if the given value is out of range of the puzzle DataFrame
with unittest.mock.patch('builtins.print') as mocked_print:
diagonal = pd.Series(((0, 0), (99, 99)))
result = self.ws.find_word_with_coordinates(dataframe, diagonal)
self.assertFalse(result)
self.assertIn(call('KeyError: value 99 of range'), mocked_print.mock_calls)
diagonal = pd.Series(((3, 0), (4, 1), (5, 2), (6, 3), (7, 4), (8, 5), (9, 6), (10, 7)))
result = self.ws.find_word_with_coordinates(dataframe, diagonal)
self.assertEqual(result, 'diagonal')
wrong = | pd.Series(((1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8))) | pandas.Series |
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
import json
import re
from datetime import datetime
import numpy as np
comm = re.compile("<!--|-->")
class Team: #change team player object
def __init__(self, team, year, player=None):
self.year = year
self.team = team
self.team_stat = requests.get(
"https://www.basketball-reference.com/teams/{}/{}.html".format(self.team, self.year)).text
self.soup = BeautifulSoup(re.sub("<!--|-->","",self.team_stat),"html.parser")
def team_sum(self, four_factor = False):
summary_container = self.soup.find("table",id="team_misc")
summary_table = summary_container.find("tbody")
team_sum_row = summary_table.find_all("tr")
dict_league_rank = {row['data-stat']:row.get_text() for row in team_sum_row[1]}
dict_team_sum = {row['data-stat']:row.get_text() for row in team_sum_row[0]}
del dict_team_sum['player'], dict_league_rank['player']
df_team = pd.DataFrame(data = [dict_team_sum, dict_league_rank],index = ['TEAM','LEAGUE']).T
for column in df_team.columns:
try:
df_team[column] = | pd.to_numeric(df_team[column]) | pandas.to_numeric |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------------------
#Libraries
#------------------------------------------------------------------------------------------------------
import sys
import os
import pandas as pd
import numpy as np
import time
from sklearn import preprocessing
import autotuning
#------------------------------------------------------------------------------------------------------
#Main program
#------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# Create a random dataset with 4 variables and 100 observations. It is
# important that the dataframe has a label for every variable as the
# process selecets the best features for the model, an identifier is needed
# for each variable.
x_array = np.random.rand(100,4)
# We generate random coefficients that will be used to generate the output
coeffs = np.random.rand(1,4)
# Generate the output and add some noise
noise = np.random.rand(100,1) * 0.45
y_array = np.sum(x_array*coeffs, axis=1).reshape(-1,1) + noise
df_X = pd.DataFrame(x_array)
df_X.columns = ['var0', 'var1', 'var2', 'var3']
df_y= | pd.DataFrame(y_array) | pandas.DataFrame |
import pandas as pd
import numpy as np
import itertools
def make_attendances_dataframe(num_atten,pat_per_day=10, seed=True):
"""
creates a random df with attendances as rows of size provided by user.
"""
#### make lists for input to df
atten_id = np.arange(num_atten)+1000
num_patients = int(num_atten/pat_per_day)
pat_id = np.random.randint(0,num_atten*1000,num_atten)
if seed == True:
np.random.seed(2)
# arrivals
num_days = int(num_atten/pat_per_day)
start_time = pd.datetime(2018,1,1) # set a date to start the attendances at
arrival_time = []
day_counter = 0
date = start_time
for atten in atten_id:
random_datetime = date + pd.Timedelta(np.random.randint(0,23),'h') + pd.Timedelta(np.random.randint(0,59),'m')
arrival_time.append(random_datetime)
day_counter += 1
if day_counter == 10:
day_counter = 0
date = date + pd.DateOffset(1,'D')
# watiting times
time_in_department = np.random.randint(1,60*6,num_atten)
# binary ambulance arrival
ambulance = np.random.randint(0,2,num_atten)
# make df
d = {'atten_id':atten_id,
'pat_id':pat_id,
'arrival_datetime':arrival_time,
'time_in_department':time_in_department,
'ambulance_arrival':ambulance}
df = pd.DataFrame(d)#.set_index('atten_id')
# make departure times
f = lambda x, y : x + pd.Timedelta(y,'m')
df['departure_datetime'] = df.apply(lambda row: f(row['arrival_datetime'], row['time_in_department']), axis=1)
# make gender
df['gender'] = df['pat_id'].apply(lambda x : x%2)
#sort values by arrival time
df = df.sort_values('arrival_datetime')
# change att dtype
df['atten_id'] = df['atten_id'].astype('int64')
return(df)
def make_timeindex_dataframe(df,col_label,freq='D'):
"""calculates the first and last times contained in dataframe.
creates new df with unique hourly or daily time interval in one column."""
df = df.set_index('atten_id') # mod for script
# get_datetime cols
cols = df.select_dtypes(include='datetime').columns
start = df[cols].min().min().replace(hour=0, minute=0,second=0)#values
end = df[cols].max().min().replace(hour=0, minute=0,second=0) +pd.Timedelta(days=1) #values
d = pd.date_range(start,end,freq=freq)
df_new = pd.DataFrame({col_label:d})
return(df_new)
# make active_attendaces - use code from previosu script
def make_HourlyTimeAttenNum_dataframe(df,arrival_col,departure_col):
"""
inputs:
df with attendance number as index,
arrival, departure datetime col names (must be datetime format)
ouptut:
df, contains many-to-many link between the arrival_
"""
df1 = df[[arrival_col,departure_col]].copy()
df1[arrival_col] = df1[arrival_col].apply(lambda x : x.replace(second=0,minute=0)) # round arrival hour down
df1[departure_col] = df1[departure_col].apply(lambda x : x.replace(second=0,minute=0)) +pd.Timedelta(hours=1) # round leaving tim up
#### create col with number of hours active
df1['n_hours'] = ((df1[departure_col] - df1[arrival_col])/pd.Timedelta(1,'h')).astype(int)
#### time efficient (i hope) function for cycling through and finding all combinations of active hours for attednaces - create a (long format) list of links between attendance numbers and
# function for list comp which finds list of datetimes (for each hour)
date_func = lambda datetime , offset : datetime + pd.Timedelta(offset,'h')
# iterate over rows in df
df1 = df1.reset_index() # reset so have the new index to itereate over
ids = np.empty(shape=(df1['n_hours'].sum()),dtype='int64') # initilise array - change to np.empty() to speed up
timestamps = np.empty(shape=(df1['n_hours'].sum()),dtype='datetime64[s]')
row_count = 0
for row in df1.itertuples():
atten_id = [row[1]]
hour_list = [date_func(row[2],i) for i in np.arange(row[4])] # creates list of hour datetimes
# create array of list for all combinations of timestamp
for i in itertools.product(atten_id,hour_list):
ids[row_count] = i[0] # assign patient numbers
timestamps[row_count] = i[1]
row_count += 1 # add to row count for new array
# put into df
data = {'atten_id':ids,
'hour':timestamps}
df_new = | pd.DataFrame(data=data) | pandas.DataFrame |
import jieba
import pandas as pd
import wordcloud
# 读取弹幕 txt 文件
with open("dan_mu.txt", encoding="utf-8") as f:
txt = f.read()
danmu_list = txt.split("\n")
# jieba 分词
danmu_cut = [jieba.lcut(item) for item in danmu_list]
# 获取停用词
with open("baidu_stopwords.txt",encoding="utf-8") as f:
stop = f.read()
stop_words = stop.split()
# 去掉停用词后的最终词
s_data_cut = | pd.Series(danmu_cut) | pandas.Series |
"""Helper classes and functions with RTOG studies.
"""
import random
import pandas as pd
import numpy as np
import pickle
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import pint
# Constants defining variable and file parsing
from rtog_constants import gcp_baseline_paths, rtog_endpoints, rtog_binary_mapping, rtog_unknown_class_X
from rtog_constants import rtog_default_class_y, rtog_text_fields, rtog_field_mapping, rtog_categorical_fields
# Functions allowing RTOG data manipulation
from rtog_constants import is_categorical, merge, serum_values_to_ng_dl
def rtog_from_study_number(study_number, create_endpoints=True, standardize=False):
"""Helper function. Loads an RTOG object given the study number (str)."""
study_path = gcp_baseline_paths[study_number]
rtog = RTOG(filename=study_path, study_number=study_number, file_type='excel', create_endpoints=create_endpoints)
if standardize:
rtog.standardize_rx()
rtog.standardize_race()
rtog.standardize_gleason_scores()
rtog.standardize_tstage()
rtog.standardize_pelvic_rt()
rtog.standardize_prostate_dose()
rtog.standardize_rt_complete()
rtog.standardize_biochemical_failure()
rtog.standardize_disease_specific_survival()
rtog.cause_of_death()
# rtog_object.standardize_baseline_serum() # Note: this line takes a long time to run, due to unit conversions. Also Osama said the data is too noisy to use.
rtog.standardize_unknown_values_in_predictor_variables() # note: this must be done after standardize_rt_complete, bc that re-sets some unknown vars. This replaces the 'unknown' classes with nans, so that boosting can intelligently impute.
print("Loaded RTOG {}, Standardized={}".format(study_number, standardize))
return rtog
class RTOG(object):
def __init__(self, filename=None, study_number=None, file_type="excel", create_endpoints=True):
self.filename = filename
self.df = None
self.study_number = study_number
# Load Endpoints, Default Classes (for y), and Unknown Classes (for X).
if self.study_number in rtog_endpoints:
self.endpoints = rtog_endpoints[study_number]
if self.study_number in rtog_default_class_y:
self.default_class_y = rtog_default_class_y[study_number]
if self.study_number in rtog_unknown_class_X:
self.unknown_class_X = rtog_unknown_class_X[study_number]
# Load Data.
if self.filename is not None:
if file_type == "excel":
self.df = pd.read_excel(filename)
elif file_type == "csv":
self.df = pd.read_csv(filename, index_col=0)
self._field_fix()
self.table_sort()
# Study-specific additional derived endpoints get hardcoded here
if study_number == '9202':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9202/All_RT_Data_9202.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
self.df_rt.rename({'pelvis_does' : 'pelvis_dose'}, axis='columns', inplace=True)
elif study_number == '9413': #note: data lacks disease specific survival
pass
elif study_number == '9408':
pass
elif study_number == '9910':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9910/Radiation_treatment_9910.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
elif study_number == "0126":
# Add Serum info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 0126/All_serum_testosteron_0126.xlsx"
self.df_serum = pd.read_excel(gcp_path)
self.df_serum.columns = self.df_serum.columns.str.lower()
else:
pass
# Replace nans with defaults in endpoint fields
self.df = self.df.fillna(self.default_class_y)
if create_endpoints:
for timeframe in [5,10,15,25]:
self.add_distant_met_Nyr_endpoint(timeframe)
self.add_biochemical_failure_Nyr_endpoint(timeframe)
self.add_disease_specific_survival_Nyr_endpoint(timeframe)
self.add_survival_Nyr_endpoint(timeframe)
def _repr_html_(self):
return self.df._repr_html_()
def __getitem__(self, columns):
if type(columns) == str:
columns = [columns]
new_rtog = self.copy()
new_rtog.df = new_rtog.df[columns]
return new_rtog
def _field_fix(self):
"""Fixes field names for uniformity and typos. Determined in rtog_constants.py
"""
self.df = self.df.rename(columns=str.lower)
self.df = self.df.rename(rtog_field_mapping, axis='columns')
def table_sort(self):
"""Sorts rows and columns in ascending order.
"""
self.df = self.df.sort_index()
self.df = self.df.sort_index(axis=1)
def add_biochemical_failure_Nyr_endpoint(self, years):
"""Adds column 'biochemical_failure_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death without failure)
"""
field_name = 'biochemical_failure'
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Does not have a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == "0126":
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
else:
raise ValueError("The failure value for biochemical_failure is not set for this study: {}".format(self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for f, fy in zip(self.df[field_name], self.df[field_name_years]):
if f == 0: # Default class for biochemical_failure is 0. Same for biochemical_failure_5yr.
new_column_vals.append(0)
if f == 2:
new_column_vals.append(2)
if f == 1:
assert ~np.isnan(fy), "Found biochemical_failure=1, with biochemical_failure_years=nan. Impossible. See rtog {}".format(
self.study_number)
if fy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_disease_specific_survival_Nyr_endpoint(self, years):
"""Adds column 'disease_specific_survival_Nyr' to self.df
Indicates if the patient has lived free of prostate cancer within N years.
Note: Contrast this with disease_free_survival, which means the patient has lived free of any disease.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'disease_specific_survival'
if self.study_number == '9202':
failure_outside_timeframe_value = 2
# field_name_years = "survival_years" # Stephanie confirmed we can use this value.
elif self.study_number == '9408':
failure_outside_timeframe_value = 2
# field_name_years = "dsm_years" # Osama confirmed we can use this value.
elif self.study_number == '9413':
failure_outside_timeframe_value = 2
elif self.study_number == '9910':
failure_outside_timeframe_value = 2
elif self.study_number == '0126':
failure_outside_timeframe_value = 2
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(
self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dss, dfsy in zip(self.df[field_name], self.df[field_name_years]):
if dss == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dss == 2:
new_column_vals.append(2)
if dss == 1:
if dfsy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
try:
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
except:
import IPython
IPython.embed()
# self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_survival_Nyr_endpoint(self, years):
"""Adds column 'survival_Nyr' to self.df. Refers to overall survival.
Args:
years(int): the years.
Column values:
0: Alive, within given years.
1: Death, within given years.
"""
field_name = 'survival'
field_name_years = "survival_years" # Note, that for disease_specific_survival=1, we can take the time period from disease_free_surival_years.
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for fn, fny in zip(self.df[field_name], self.df[field_name_years]):
if fn == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if fn == 1:
if fny <= years:
new_column_vals.append(1)
else:
new_column_vals.append(0)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_distant_met_Nyr_endpoint(self, years):
"""Adds column 'distant_met_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years (metastatic prostate cancer)
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'distant_met'
field_name_years = field_name + "_years"
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Has a 'competing events' class
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '0126':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(self.study_number))
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dm, dmy in zip(self.df[field_name], self.df[field_name_years]):
if dm == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dm == 2:
new_column_vals.append(2)
if dm == 1:
assert ~np.isnan(dmy), "Found distant_met=1, with distant_met_years=nan. Impossible. See rtog {}".format(self.study_number)
if dmy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def _add_endpoint_field(self, endpoint_field, default_class_y):
if endpoint_field in self.endpoints:
if self.default_class_y[endpoint_field] != default_class_y:
raise ValueError("Endpoint already listed, with different default class: {}. New attempt: {}".format(
self.default_class_y[endpoint_field], default_class_y
))
return
self.endpoints.append(endpoint_field)
self.default_class_y[endpoint_field] = default_class_y
def printc(self):
prev = pd.options.display.max_columns
prev_r = pd.options.display.max_rows
pd.options.display.max_columns = None
pd.options.display.max_rows = 90
display(self.df)
pd.options.display.max_columns = prev
pd.options.display.max_rows = prev_r
def get_fields(self):
return self.df.columns
def set_study_number(self, number):
if number not in rtog_endpoints:
raise ValueError('Study number not available: {}. Options: {}'.format(number, rtogendpoints.keys()))
self.study_number = number
self.endpoints = rtog_endpoints[number]
self.default_class_y = rtog_default_class_y[number]
def copy(self):
new_rtog = RTOG()
new_rtog.df = self.df.copy(deep=True)
new_rtog.filename = self.filename
new_rtog.study_number = self.study_number
new_rtog.endpoints = self.endpoints
new_rtog.default_class_y = self.default_class_y
new_rtog.unknown_class_X = self.unknown_class_X
return new_rtog
def drop(self, columns=''):
new_rtog = self.copy()
new_rtog.df = self.df.drop(columns=columns)
return new_rtog
def clear_columns(self, columns=[""]):
"""Sets the specified column values to empty.
Args:
columns(list): the names of the columns to replace.
"""
N = len(self.df)
new_rtog = self.copy()
null_columns = {c : [''] * N for c in columns}
for c, l in null_columns.items():
new_rtog.df[c] = l
return new_rtog
def endpoint_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select endpoint fields")
return self.endpoints
def text_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select text fields")
return rtog_text_fields[self.study_number]
def get_Xy(self, y_var=None, make_binary=False):
"""Returns training/testing data, properly formatted.
For each study, see the RTOG XXXX Variable Listings documents for reference.
Args:
y_var(str): the column of self.df to use as the prediction variable. E.g. y_var='cod'
Any rows with nans are removed.
make_binary(bool): if True, it returns a binary vector (0,1), using the class mapping
defined above, rtog_binary_mapping.
"""
# Set X. Don't impute. Boosting methods do this better than you can.
rtog_X = self.drop(columns=self.endpoint_fields() + self.text_fields())
rtog_X = rtog_X.copy()
rtog_meta = self.copy()
rtog_meta.df = rtog_meta.df[self.endpoint_fields()]
# Set y. Impute to default class.
rtog_y = self.copy()
rtog_y = rtog_y[rtog_y.endpoint_fields()]
if y_var:
default_class_y = self.default_class_y[y_var]
rtog_y = rtog_y[y_var]
rtog_y.df = rtog_y.df.fillna(default_class_y)
if make_binary: # Forces y to be binary, using a pre-specified mapping in the parent class.
for c in rtog_y.df.columns:
mapping = rtog_binary_mapping[self.study_number][c]
rtog_y.df[c] = rtog_y.df[c].replace(mapping)
return rtog_X, rtog_y, rtog_meta
def generate_test_set(self, size=100, seed=None, field_to_balance=""):
"""Samples a test set, printing the class statistics of each.
Args:
size(int): the number of entries to sample
seed(int): Optional. Random seed for reproducibility.
field_to_balance(str): Optional. If set, function tries to return an equal class
balance in this field. E.g. disease_free_survival
Returns:
RTOG object - the sampled test set.
"""
if seed is not None:
random.seed(seed)
df = self.df.copy(deep=True)
if field_to_balance:
classes = df[field_to_balance].unique()
indices = {}
for c in classes:
sub_df = df[df[field_to_balance] == c]
indices[c] = list(sub_df.index)
m = min([len(v) for _, v in indices.items()])
for c, l in indices.items():
if len(l) > m:
random.shuffle(l)
indices[c] = l[:m]
idx = [elem for _, l in indices.items() for elem in l]
else:
idx = list(range(len(df)))
random.shuffle(idx)
idx = idx[:size]
new_rtog = self.copy()
new_rtog.df = df
new_rtog.df = new_rtog.df.loc[idx]
return new_rtog
def to_csv(self, filename):
self.df.to_csv(filename)
def standardize_disease_specific_survival(self, drop_prior_columns=True):
self.standardize_disease_specific_survival_events(drop_prior_columns=drop_prior_columns)
self.standardize_disease_specific_survival_years(drop_prior_columns=drop_prior_columns)
# If DSS-years unspecified but DSS censored, set DSS-years to 25 (assume long time).
isnan = self.df['disease_specific_survival_years'].isnull().values
iszero = (self.df['disease_specific_survival'] == 0).values
self.df.loc[np.logical_and(isnan, iszero), 'disease_specific_survival_years'] = 25
def standardize_disease_specific_survival_events(self, drop_prior_columns=True):
"""Merges variants of DSS, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'year' not in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(disease_specific_survival=new_values)
def standardize_disease_specific_survival_years(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'years' in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival_years' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna( | pd.Series(next_best) | pandas.Series |
#-*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import datetime as dt
import os
import io
import requests
from dateutil.relativedelta import relativedelta
from Modules import Read
from Modules import Graphs
from Modules.Utils import Listador, FindOutlier, Cycles
def SSTregions():
"""
Read SST weekly anomalies and put them in a DataFrame
OUPUTS:
SST : DataFrame with the anomalies of SST in El Niño regions
"""
SSTweek = 'https://www.cpc.ncep.noaa.gov/data/indices/wksst8110.for'
s = requests.get(SSTweek).content
date = []
N12 = []
N12_A = []
N3 = []
N3_A = []
N34 = []
N34_A = []
N4 = []
N4_A = []
with io.StringIO(s.decode('utf-8')) as f:
data = f.readlines()
for d in data[4:]:
d = d.strip()
d = d.split(' ')
date.append(dt.datetime.strptime(d[0], '%d%b%Y'))
N12 .append(float(d[1][:4]))
N12_A.append(float(d[1][4:]))
N3 .append(float(d[2][:4]))
N3_A .append(float(d[2][4:]))
N34 .append(float(d[3][:4]))
N34_A.append(float(d[3][4:]))
N4 .append(float(d[4][:4]))
N4_A .append(float(d[4][4:]))
SST = pd.DataFrame(np.array([N12_A,N3_A,N34_A,N4_A]).T, index=date, \
columns=[u'Niño1+2',u'Niño3',u'Niño34',u'Niño4'])
return SST
def ONIdata():
"""
Read ONI data and put them in a DataFrame
OUPUTS:
ONI : DataFrame with the ONI data
"""
linkONI = 'https://www.cpc.ncep.noaa.gov/data/indices/oni.ascii.txt'
s = requests.get(linkONI).content
Season = []
year = []
Total = []
Anom = []
date = []
with io.StringIO(s.decode('utf-8')) as f:
data = f.readlines()
m = 0
for d in data[1:]:
d = d.strip()
d = d.split()
Season.append(d[0])
year .append(int(d[1]))
Total .append(float(d[2]))
Anom .append(float(d[3]))
date .append(dt.datetime(1950,2,1)+relativedelta(months=m))
m+=1
ONI = pd.DataFrame(np.array([Anom, Total, Season]).T, index=date, \
columns=[u'Anomalie', u'Total',u'Season'])
return ONI
def SOIdata():
"""
Read ONI data and put them in a DataFrame
OUPUTS:
ONI : DataFrame with the ONI data
"""
# linkSOI = 'http://www.bom.gov.au/climate/enso/soi.txt'
linkSOI = 'https://www.ncdc.noaa.gov/teleconnections/enso/indicators/soi/data.csv'
s = requests.get(linkSOI).content
date = []
soi = []
with io.StringIO(s.decode('utf-8')) as f:
data = f.readlines()
m = 0
for i in range(len(data)):
if i >=2:
row = data[i].strip()
val = row.split(',')
date.append(dt.datetime.strptime(val[0], '%Y%m'))
soi.append(float(val[1]))
SOI = pd.DataFrame(np.array(soi).T, index=date, columns=[u'SOI'])
return SOI
def MEIdata():
"""
Read ONI data and put them in a DataFrame
OUPUTS:
ONI : DataFrame with the ONI data
"""
linkMEI = 'https://psl.noaa.gov/enso/mei/data/meiv2.data'
s = requests.get(linkMEI).content
date = []
mei = []
with io.StringIO(s.decode('utf-8')) as f:
data = f.readlines()
lims = np.array(data[0].strip().split(' ')).astype(int)
for i in range(len(data)):
if i >=1:
row = data[i].strip()
val = row.split(' ')
for m in range(12):
date.append(dt.datetime(int(val[0]),m+1,1))
mei.append(np.array(val[1:]).astype(float))
if int(val[0])== lims[1]-1:
break
mei = np.array(mei).reshape(len(mei)*12)
MEI = pd.DataFrame(np.array(mei).astype(float), index=date, columns=[u'MEI'])
return MEI
# Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanData'))
# Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanNiveles'))
# Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanSedimentos'))
Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Datos/ENSO_P'))
Path_out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'ENSO'))
SOI = SOIdata()
MEI = MEIdata()
ONI = ONIdata()
ONI = ONI['Anomalie'].astype(float)
SST = SSTregions()
if Est_path.endswith('CleanSedimentos'):
Estaciones = Listador(Est_path, inicio='Trans',final='.csv')
else :
Estaciones = Listador(Est_path,final='.csv')
rezagos = 24
ONI_r = pd.DataFrame([], columns=np.arange(rezagos+1))
ONI_s = pd.DataFrame([], columns=np.arange(rezagos+1))
MEI_r = pd.DataFrame([], columns=np.arange(rezagos+1))
MEI_s = pd.DataFrame([], columns=np.arange(rezagos+1))
SOI_r = pd.DataFrame([], columns=np.arange(rezagos+1))
SOI_s = pd.DataFrame([], columns=np.arange(rezagos+1))
def Correlogram(X1,X2, lags=24,
graph=True,title='', name='Correlogram',
pdf=False, png=True, PathFigs=Path_out):
"""
Make correlogram with figure
INPUTS
X1 : array with serie to correlate
X2 : array with serie to correlate
lags : integer to lag the series
"""
pear = np.empty(lags+1, dtype=float)*np.nan
for i in range(lags+1):
if len(X2)-i > 3:
pear[i] = np.corrcoef(X1[i:],X2[:len(X2)-i])[0,1]
if graph == True:
Graphs.GraphCorrelogram(pear, title=title, name=name, pdf=pdf, png=png, PathFigs=PathFigs)
return pear
def Anomalies(Serie):
"""
Make anomalies compared with the annual cycle
INPUTS
Serie : Pandas DataFrame with data
"""
Ciclo = Cycles(Serie, type='annual')
Devtd = Cycles(Serie, type='annual',percentiles='std')
Anom = Serie.copy()
for i in range(len(Serie)):
med = Ciclo[Serie.index[i].month - 1]
std = Devtd[Serie.index[i].month - 1]
Anom.iloc[i] = (Serie.iloc[i] - med)/std
return Anom
for i in range(len(Estaciones)):
if Est_path.endswith('CleanSedimentos'):
Est = Estaciones[i].split('_')[1].split('.csv')[0]+'_Sedimentos'
lab = 'Anomalia transporte'
elif Est_path.endswith('ENSO_P'):
Est = Estaciones[i].split('.csv')[0]
lab = 'Anomalia precipitación'
else:
Meta = pd.read_csv(os.path.join(Est_path, Estaciones[i].split('.')[0]+'.meta'),index_col=0)
Name = Meta.iloc[0].values[0]
Est = Name+'Caudal' if Meta.iloc[-4].values[0]=='CAUDAL' else Name+'Nivel'
lab = 'Anomalia Caudal' if Meta.iloc[-4].values[0]=='CAUDAL' else'Anomalia Nivel'
if Est_path.endswith('CleanNiveles'):
Est = Name+'NR'
lab = 'Anomalia nivel real'
if Est_path.endswith('CleanSedimentos'):
serie = pd.read_csv(os.path.join(Est_path, Estaciones[i]), index_col=0)
serie.index = pd.DatetimeIndex(serie.index)
elif Est_path.endswith('ENSO_P'):
serie = pd.read_csv(os.path.join(Est_path, Estaciones[i]), index_col=0)
serie.index = pd.DatetimeIndex(serie.index)
serie[serie==99999] = np.nan
serie = serie.dropna()
else:
serie = Read.EstacionCSV_pd(Estaciones[i], Est, path=Est_path)
if Estaciones[i].endswith('N.csv') == False:
serie.index = [dt.datetime.strptime(fecha.strftime("%Y-%m-%d") , "%Y-%d-%m") for fecha in serie.index]
try:
Anoma = Anomalies(serie)
except:
continue
monthly = Anoma.groupby(lambda y : (y.year,y.month)).mean()
monthly.index = [dt.datetime(idx[0],idx[1],1) for idx in monthly.index]
monthly = monthly.dropna()
Monthly = monthly.rolling(3).mean()
Monthly = Monthly.dropna()
DF_oni = Monthly.join(ONI, how='inner')
df_oni = monthly.join(ONI, how='inner')
DF_mei = Monthly.join(MEI, how='inner')
df_mei = monthly.join(MEI, how='inner')
DF_soi = Monthly.join(SOI, how='inner')
df_soi = monthly.join(SOI, how='inner')
Graphs.GraphSerieENSO(ONI, Anoma.sort_index(),
twin=False, labelENSO='ONI', labelSerie=lab, title=Est,
name='ONI_'+Est.replace(' ',''), pdf=False, png=True, PathFigs=Path_out)
Graphs.GraphSerieENSO(MEI, Anoma.sort_index(),
twin=False, labelENSO='MEI', labelSerie=lab, title=Est,
name='MEI_'+Est.replace(' ',''), pdf=False, png=True, PathFigs=Path_out)
Graphs.GraphSerieENSO(SOI, Anoma.sort_index(),
twin=False, labelENSO='SOI', labelSerie=lab, title=Est,
name='SOI_'+Est.replace(' ',''), pdf=False, png=True, PathFigs=Path_out)
oni_r = Correlogram(DF_oni.values[:,0],DF_oni.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación ONI con '+Est,
name='Correlogram_ONI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
oni_s = Correlogram(df_oni.values[:,0],df_oni.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación ONI con '+Est,
name='CorrelogramSimple_ONI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
mei_r = Correlogram(DF_mei.values[:,0],DF_mei.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación MEI con '+Est,
name='Correlogram_MEI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
mei_s = Correlogram(df_mei.values[:,0],df_mei.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación MEI con '+Est,
name='CorrelogramSimple_MEI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
soi_r = Correlogram(DF_soi.values[:,0],DF_soi.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación SOI con '+Est,
name='Correlogram_SOI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
soi_s = Correlogram(df_soi.values[:,0],df_soi.values[:,1], lags=rezagos,
graph=True,
title=u'Correlación SOI con '+Est,
name='CorrelogramSimple_SOI_'+Est.replace(' ',''),
pdf=False, png=True, PathFigs=Path_out)
oni_r = | pd.Series(data=oni_r, name=Est) | pandas.Series |
## Import
import tweepy
import os.path
import pandas as pd
from time import time
class StreamListener(tweepy.StreamListener):
def __init__(self, logger, language=None, api=None):
self.logger = logger # Inicializa o Logger
self.total = 0
self.session_total = 0
self.language = language
# Save Control
self.processed_tweets_to_save = 3000
self.save_directory = 'data/'
self.save_file = 'raw_data'
self.save_location = self.save_directory + self.save_file + '.gzip'
# Backup Control
self.processed_tweets_to_backup = 6000
self.elapsed_time_to_save = 600
self.last_save_time = time()
self.raw_tweet = None
self.raw_tweet = self.load_raw_tweet()
# Info control
self.start_time = time()
super().__init__(api)
def load_raw_tweet(self):
self.logger.debug("Loading Tweets")
load_location = self.save_location
if not os.path.isfile(load_location):
load_location = load_location + '_bkp'
try:
with open(load_location, 'r'):
raw_tweets = | pd.read_parquet(load_location) | pandas.read_parquet |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([ | u('b') | pandas.compat.u |
import numpy as np
import pandas as pd
import optuna
import sklearn.ensemble as ensemble
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
from itertools import chain
int_dtype_list = ['int8', 'int16', 'int32',
'int64', 'uint8', 'uint16', 'uint32', 'uint64']
float_dtype_list = ['float16', 'float32', 'float64']
def convert_multi_category(train_df, test_df, split=','):
df = pd.concat([train_df, test_df])
splited_values = df.apply(
pd.value_counts).index.str.split(split).to_series()
striped_values = pd.Series(
list(chain.from_iterable(splited_values.to_list()))).str.strip()
columns = sorted(set(striped_values))
column_names = list(map(lambda x: 'label_' + x, columns))
return_df = pd.DataFrame(columns=column_names)
for _, row in df.iterrows():
droped_values = list(chain.from_iterable(
pd.Series(row).dropna().str.split(split).to_list()))
if len(droped_values) == 0:
unique_values = []
else:
unique_values = pd.Series(droped_values).str.strip().values
row_df = pd.DataFrame()
for column in columns:
row_df['label_' +
column] = [1] if (column in unique_values) else [0]
return_df = return_df.append(row_df, ignore_index=True)
return_train_df = return_df[0:len(train_df)]
return_test_df = return_df[len(train_df):].reset_index(drop=True)
return return_train_df, return_test_df
def _target_data(train_df: pd.DataFrame, target_col: str) -> pd.Series:
"""Get target column and data from train data
Extended description of function.
Parameters
----------
train_df : pd.DataFrame
train data
target_col : str
target column name
Returns
-------
pd.Series
>>> import pandas as pd
>>> data = pd.DataFrame({"param": [1, 2, 3], "target": [1, 0, 1]})
>>> _target_data(data, "target")
y1:target
0 1
1 0
2 1
"""
target_series = train_df[target_col]
target_series.name = "y1:" + target_col
return target_series
def convert_series(train_series: pd.Series, test_series: pd.Series, threshold_one_hot=0.3, include_dummy_na=False):
series = pd.concat([train_series, test_series])
dtype = series.dtype
value_counts = series.value_counts()
value_counts_number = value_counts.shape[0]
rows_count = len(series)
return_df = pd.DataFrame()
if dtype in int_dtype_list:
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = value_counts.index[0]
series[np.isnan(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
elif dtype in float_dtype_list:
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = series.value_counts().index[0]
series[np.isnan(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
else:
mean = series.mean()
series[np.isnan(series)] = mean
return_df[series.name + "_float"] = series
elif (dtype == 'object') or (dtype == 'bool'):
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = series.value_counts().index[0]
series[pd.isnull(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
return return_df[0:len(train_series)], return_df[len(train_series):]
def _make_return_df(train_df, test_df, target_col, threshold_one_hot, multi_category):
if (multi_category is None):
multi_category_columns = []
else:
multi_category_columns = list(chain.from_iterable(multi_category))
return_train_df = pd.DataFrame()
return_test_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
"""A module for meta-learner predictability.
This module contains the class :class:`MetaLearnPredictability` for meta-learner predictability. This class predicts whether a time series is predictable or not.
The predictability of a time series is determined by whether the forecasting errors of the possible best forecasting model can be less than a user-defined threshold.
"""
import ast
import logging
from typing import Dict, List, Optional, Union, Any
import joblib
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.tsfeatures.tsfeatures import TsFeatures
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.metrics import precision_recall_curve, precision_recall_fscore_support
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
class MetaLearnPredictability:
"""Meta-learner framework on predictability.
This framework uses classification algorithms to predict whether a time series is predictable or not (
we define the time series with error metrics less than a user defined threshold as predictable).
For training, it uses time series features as inputs and whether the best forecasting models' errors less than the user-defined threshold as labels.
For prediction, it takes time series or time series features as inputs to predict whether the corresponding time series is predictable or not.
This class provides preprocess, pred, pred_by_feature, save_model and load_model.
Attributes:
metadata: Optional; A list of dictionaries representing the meta-data of time series (e.g., the meta-data generated by GetMetaData object).
Each dictionary d must contain at least 3 components: 'hpt_res', 'features' and 'best_model'. d['hpt_res'] represents the best hyper-parameters for each candidate model and the corresponding errors;
d['features'] are time series features, and d['best_model'] is a string representing the best candidate model of the corresponding time series data.
metadata should not be None unless load_model is True. Default is None
threshold: Optional; A float representing the threshold for the forecasting error. A time series whose forecasting error of the best forecasting model is higher than the threshold is considered as unpredictable. Default is 0.2.
load_model: Optional; A boolean to specify whether or not to load a trained model. Default is False.
Sample Usage:
>>> mlp = MetaLearnPredictability(data)
>>> mlp.train()
>>> mlp.save_model()
>>> mlp.pred(TSdata) # Predict whether a time series is predictable.
>>> mlp2 = MetaLearnPredictability(load_model=True) # Create a new object to load the trained model
>>> mlp2.load_model()
"""
def __init__(
self,
metadata: Optional[List[Any]] = None,
threshold: float = 0.2,
load_model=False,
) -> None:
if load_model:
msg = "Initialize this class without meta data, and a pretrained model should be loaded using .load_model() method."
logging.info(msg)
else:
if metadata is None:
msg = "Please input meta data to initialize this class."
logging.error(msg)
raise ValueError(msg)
if len(metadata) <= 30:
msg = "Dataset is too small to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if "hpt_res" not in metadata[0]:
msg = "Missing best hyper-params, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if "features" not in metadata[0]:
msg = "Missing time series features, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if "best_model" not in metadata[0]:
msg = "Missing best models, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
self.metadata = metadata
self.threshold = threshold
self._reorganize_data()
self._validate_data()
self.rescale = False
self.clf = None
self._clf_threshold = None
def _reorganize_data(self) -> None:
"""Reorganize raw input data into features and labels."""
metadata = self.metadata
self.features = []
self.labels = []
for i in range(len(metadata)):
try:
if isinstance(metadata[i]["hpt_res"], str):
hpt = ast.literal_eval(metadata[i]["hpt_res"])
else:
hpt = metadata[i]["hpt_res"]
if isinstance(metadata[i]["features"], str):
feature = ast.literal_eval(metadata[i]["features"])
else:
feature = metadata[i]["features"]
self.features.append(feature)
self.labels.append(hpt[metadata[i]["best_model"]][1])
except Exception as e:
logging.exception(e)
self.labels = (np.array(self.labels) > self.threshold).astype(int)
self.features = | pd.DataFrame(self.features) | pandas.DataFrame |
import getpass
import pandas as pd
from PySide2 import QtGui
from PySide2.QtWidgets import QMainWindow, QMessageBox
from components.mensagens import Mensagens
from dao.relatorio_dao import RelatorioDao
from view.ui_tela_relatorio_chamados import Ui_RelatorioChamado
class TelaRelatorioChamado(QMainWindow, Ui_RelatorioChamado):
"""Classe da tela de relatório de chamados.
Esta classe tem por finalidade gerar vários relatórios conforme necessidade do usuário.
"""
def __init__(self):
super(TelaRelatorioChamado, self).__init__()
self.setupUi(self)
self.setWindowTitle("Relatório de Chamados")
self.setFixedSize(400, 466)
self.popula_combo_solucao()
self.mensagem = Mensagens()
self.btn_cancelar.clicked.connect(self.close)
self.btn_gerar_solucao.clicked.connect(self.gerar_relatorio_solucao)
"""Função que chamado o método de gerar relatório de soluções."""
self.btn_gerar_data.clicked.connect(self.gerar_relatorio_chamado_data)
"""Função que chamado o método de gerar relatório por Data."""
self.btn_gerar_tipo.clicked.connect(self.gerar_relatorio_tipo_chamado)
"""Função que chamado o método de gerar relatório por tipo."""
self.btn_gerar_status.clicked.connect(self.gerar_relatorio_status_chamado)
"""Função que chamado o método de gerar relatório por Status."""
self.btn_gerar_relatorio_padrao.clicked.connect(self.gerar_relatorio_padrao)
"""Função que chamado o método de gerar relatório padrão."""
def popula_combo_solucao(self):
"""Popular combo solução
Popula a combo de solução com o nome das soluções cadastradas.
:return: Lista de Soluções.
"""
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.consulta_nome_solucao()
for i in resultado:
self.combo_solucao.addItem(str(i[0]))
def gerar_relatorio_chamado_data(self):
"""Gerar relatório por data.
Gera um relatório tendo como parametro a data e salva em .xlsx.
:return: Arquivo .xlsx
"""
user_windows = getpass.getuser()
if self.txt_data.text() == "":
self.mensagem.mensagem_campo_vazio('DATA')
else:
data = self.txt_data.text()
if self.radio_numero_chamado.isChecked():
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_chamado_data_ordenado_por_numero(data)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = pd.DataFrame(resultado)
dados.columns = ['Chamado', 'Contrato', 'Cliente', 'Endereço', 'Contato', 'Telefone', 'E-mail',
'Problema', 'Observação', 'Status', 'Tipo de Chamado', 'Solução',
'Data Abertura',
'Data Fechamamento']
data_formatada = data.replace('/', '_')
dados.to_excel(f'c:\\Users\\{user_windows}\\Downloads\\'
f'Relatorio_chamados_{data_formatada}_por_numero_chamado.xlsx', index=False)
self.mensagem.mensagem_gerar_relatorio()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
else:
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_chamado_data_ordenado_por_contrato(data)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = pd.DataFrame(resultado)
dados.columns = ['Chamado', 'Contrato', 'Cliente', 'Endereço', 'Contato', 'Telefone', 'E-mail',
'Problema', 'Observação', 'Status', 'Tipo de Chamado', 'Solução',
'Data Abertura', 'Data Fechamamento']
data_formatada = data.replace('/', '_')
dados.to_excel(f'c:\\Users\\{user_windows}\\Downloads\\'
f'Relatorio_chamados_{data_formatada}_por_contrato.xlsx', index=False)
self.mensagem.mensagem_gerar_relatorio()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def gerar_relatorio_solucao(self):
"""Gerar relatório por solução.
Gera um relatório tendo como parametro a solução e salva em .xlsx.
:return: Arquivo .xlsx
"""
user_windows = getpass.getuser()
solucao = self.combo_solucao.currentText()
if self.combo_solucao.currentText() == "Selecione uma opção":
self.mensagem.mensagem_combo('SOLUÇÃO')
else:
if self.radio_numero_chamado.isChecked():
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_solucao_ordenado_numero_chamado(solucao)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = | pd.DataFrame(resultado) | pandas.DataFrame |
#!/usr/bin/env python
from scipy import interpolate
import numpy as np
from numpy.lib.recfunctions import append_fields
import scipy.signal as sig
import scipy.stats as st
import time, os
import pandas as pd
import math
#import report_ctd
import ctdcal.report_ctd as report_ctd
import warnings
import ctdcal.fit_ctd as fit_ctd
import datetime
from decimal import Decimal
import settings
import sys
sys.path.append('ctdcal/')
import oxy_fitting
import gsw
warnings.filterwarnings("ignore", 'Mean of empty slice.')
def cast_details(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
'''
We determine the cast details using pandas magic.
First find alternating periods of pumps on and pumps off, then select the
pumps on period with the highest pressure. Get values from the row with the
highest pressure, and return all values to be sent to log.
Input:
stacast - integer, the station and cast, as SSSCC format
log_file - file handle or string, log_file
p_col - string, name of the pressure column
time_col - string, name of the time column
b_lat_col - string, name of the latitude column
b_lon_col - string, name of the longitude column
alt_col - string, name of the altimeter column
inMat - pandas dataframe, the dataframe to come in
Output:
start_cast_time - float, unix epoch seconds?, start of cast time, to be reported to log file
end_cast_time - float, unix epoch seconds?, end of cast time, to be reported to log file
bottom_cast_time - float, unix epoch seconds?, bottom of cast time, to be reported to log file
start_pressure - float, pressure at which cast started, to be reported to log file
max_pressure - float, bottom of the cast pressure, to be reported to log file
b_lat - float, latitude at bottom of cast
b_lon - float, longitude at bottom of cast
b_alti - float, altimeter reading at bottom of cast - volts only!
inMat - the dataframe that came in, with soak period trimmed off
don't need end_cast_time, max_pressure
inMat is trimmed to start and end of cast
'''
df_test = pd.DataFrame.from_records(inMat)
dfs = find_pump_on_off_dfs(df_test)
dfs_1 = find_pumps_on_dfs(dfs)
df_cast = find_max_pressure_df(dfs_1)
df_cast1 = find_last_soak_period(df_cast)
df_cast2 = trim_soak_period_from_df(df_cast1)
start_cast_time = float(df_cast2['scan_datetime'].head(1))
start_pressure = float(df_cast2['CTDPRS'].head(1))
end_cast_time = float(df_cast2['scan_datetime'].tail(1))
max_pressure = float(df_cast2['CTDPRS'].max())
bottom_cast_time = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['scan_datetime'])
b_lat = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLAT'])
b_lon = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLON'])
b_alti = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['ALT'])
#last two lines must be in to return the same as old - change to slices of df later
report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time,
bottom_cast_time, start_pressure, max_pressure, b_alti,
b_lat, b_lon)
#reconvert to ndarray - might need to be altered to remove second index
# inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()].to_records(index=False)
inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()]
return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
#Move next four functions to a library or class(?) Clean up module
def find_pump_on_off_dfs(df):
'''Find pump_on patterns of dataframes, and return a list(?) of dataframes to iterate over.
'''
return [g for i,g in df.groupby(df['pump_on'].ne(df['pump_on'].shift()).cumsum())]
def find_max_pressure_df(dfs):
'''Giving a list of data frames, return a reference to the frame with which contians the highest pressure value
'''
max_pressure_df = dfs[0]
max_pressure = max_pressure_df['CTDPRS'].max() #TODO make into config var
for df in dfs:
if df['CTDPRS'].max() > max_pressure:
max_pressure_df = df
return max_pressure_df
def find_pumps_on_dfs(dfs):
'''given a list of dataframes, remove all the frames with one or more rows containing a "false" pump on flag
'''
return list(filter(lambda df: df['pump_on'].all(), dfs))
def trim_soak_period_from_df(df):
'''Look for minimum pressure in dataframe, then return everything after minimum pressure/top of cast.
'''
test = int(df.iloc[1:int((len(df)/4))]['CTDPRS'].idxmin())
return df.loc[test:]
def find_last_soak_period(df_cast, surface_pressure=2, time_bin=8, downcast_pressure=50):
"""Find the soak period before the downcast starts.
The algorithm is tuned for repeat hydrography work, specifically US GO-SHIP
parameters. This assumes the soak depth will be somewhere between 10 and 30
meters, the package will sit at the soak depth for at least 20 to 30 seconds
before starting ascent to the surface and descent to target depth.
Parameters
----------
df_cast : DataFrame
DataFrame of the entire cast
surface_pressure : integer
Minimum surface pressure threshold required to look for soak depth.
2 dbar was chosen as an average rosette is roughly 1.5 to 2 meters tall.
time_bin : integer
Time, in whole seconds.
downcast_pressure : integer
Minimum pressure threshold required to assume downcast has started.
50 dbar has been chosen as double the deep soak depth of 20-30 dbar.
Returns
-------
df_cast_ret : DataFrame
DataFrame starting within time_bin seconds of the last soak period.
The algorithm is not guaranteed to catch the exact start of the soak period,
but within a minimum period of time_bin seconds(?) from end of the soak if
the soak period assumption is valid. This should be shorter than the total
soak period time, and able to catch the following rise and descent of the
package that signals the start of the cast.
The algorithm has been designed to handle four general cases of casts:
* A routine cast with pumps turning on in water and normal soak
* A cast where the pumps turn on in air/on deck
* A cast where the pumps turn on and off due to rosette coming out of water
* A cast where there are multiple stops on the downcast to the target depth
"""
#Validate user input
if time_bin <= 0:
raise ValueError('Time bin value should be positive whole seconds.')
if downcast_pressure <=0:
raise ValueError('Starting downcast pressure threshold must be positive integers.')
if downcast_pressure < surface_pressure:
raise ValueError(f'Starting downcast pressure threshold must be greater \
than surface pressure threshold.')
# If pumps have not turned on until in water, return DataFrame
if df_cast.iloc[0]['CTDPRS'] > surface_pressure:
return df_cast
#Bin the data by time, and compute the average rate of descent
df_blah = df_cast.loc[:,:]
df_blah['bin'] = pd.cut(df_blah.loc[:,'index'],
range(df_blah.iloc[0]['index'],df_blah.iloc[-1]['index'],time_bin*24),
labels=False, include_lowest=True)
df_blah2 = df_blah.groupby('bin').mean()
#Compute difference of descent rates and label bins
df_blah2['prs_diff'] = df_blah2['CTDPRS'].diff().fillna(0).round(0)
df_blah2['movement'] = pd.cut(df_blah2['prs_diff'], [-1000,-0.5,0.5,1000], labels=['up','stop','down'])
#Find all periods where the rosette is not moving
df_stop = df_blah2.groupby('movement').get_group('stop')
groupby_test = df_blah2.groupby(df_blah2['movement'].ne(df_blah2['movement'].shift()).cumsum())
list_test = [g for i,g in groupby_test]
#Find a dataframe index of the last soak period before starting descent
def poop(list_obj, downcast_pressure):
""" Return dataframe index in the last soak period before starting
descent to target depth.
"""
for i, x in zip(range(len(list_test)),list_test):
if x['CTDPRS'].max() < downcast_pressure:
if x.max()['movement'] == 'stop':
index = i
if x['CTDPRS'].max() > downcast_pressure:
return index
return index
#Truncate dataframe to new starting index : end of dataframe
start_index = np.around(list_test[poop(list_test, downcast_pressure)].head(1)['index'])
df_cast = df_cast.set_index('index')
df_cast = df_cast.loc[int(start_index):,:]
df_cast_ret = df_cast.reset_index()
return df_cast_ret
#End move four functions
# def cast_details_old(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
# """cast_details function
#
# Function takes full NUMPY ndarray with predefined dtype array
# and adjusts ndarray to remove all extraneous surface data.
# Function returns cast start time, end time, bottom time and
# cleaned up matrix.
#
# Args:
# param1 (str): stacast, station cast input
# param2 (str): log_file, log file to write cast data.
# param3 (str): p_col, pressure data column name
# param4 (str): time_col, time data column name
# param5 (ndarray): inMat, numpy ndarray with dtype array
#
# Returns:
# Narray: The return value is ndarray with adjusted time of parameter
# specified.
#
# """
#
#
# if inMat is None:
# print("In cast_details: No data")
# return
# else:
# # Top of cast time, bottom of cast time, end of cast time,
# start_cast_time = 0.0
# bottom_cast_time = 0.0
# end_cast_time = 0.0
# # Test cycle time constant
# fl = 24
# # starting P
# start_pressure = 2.0
# # Max P
# max_pressure = 10000.0
# lm = len(inMat)-1
# rev = np.arange(int(lm/4),0,-1)
#
# # Find starting top of cast
# # Smallest P from reverse array search
# for i in rev:
# if start_pressure < inMat[p_col][i]:
# tmp = i
# elif start_pressure > inMat[p_col][i]:
# start_pressure = inMat[p_col][i]
# tmp = abs(i - 24) #patched to not break through the c(sea)-floor, can be made cleaner
# break
# start_cast_time = inMat[time_col][tmp]
#
# # Remove everything before cast start
# inMat = inMat[tmp:]
#
# # Max P and bottom time
# max_pressure = max(inMat[p_col])
# tmp = np.argmax((inMat[p_col]))
# bottom_cast_time = inMat[time_col][tmp]
# b_lat = inMat[b_lat_col][tmp]
# b_lon = inMat[b_lon_col][tmp]
# b_alti = inMat[alt_col][tmp]
#
# tmp = len(inMat)
# # Find ending top of cast time
# for i in range(int(tmp/2),tmp):
# if start_pressure > inMat[p_col][i]:
# end_cast_time = inMat[time_col][i]
# if i < tmp: tmp = i + 24
# break
#
# # Remove everything after cast end
# inMat = inMat[:tmp]
#
# report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_alti, b_lat, b_lon)
#
# return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
def ctd_align(inMat=None, col=None, time=0.0):
"""ctd_align function
Function takes full NUMPY ndarray with predefined dtype array
and adjusts time of sensor responce and water flow relative to
the time frame of temperature sensor.
Args:
param1 (ndarray): inMat, numpy ndarray with dtype array
param2 (float): col, column to apply time advance to.
param3 (float): time, advance in seconds to apply to raw data.
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
# Num of frames per second.
fl = 24
if (inMat is not None) & (col is not None) & ( time > 0.0):
# Time to advance
advnc = int(fl * time)
tmp = np.arange(advnc, dtype=np.float)
last = inMat[col][len(inMat)-1]
tmp.fill(float(last))
inMat[col] = np.concatenate((inMat[col][advnc:],tmp))
return inMat
def ctd_quality_codes(column=None, p_range=None, qual_code=None, oxy_fit=False, p_qual_col=None, qual_one=None, inMat=None):
"""ctd_quality_codes function
Function takes full NUMPY ndarray with predefined dtype array
Args:
param1 (ndarray):
param2 (float):
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
#If p_range set apply qual codes to part of array and return
if p_range is not None:
print("Some algoirythm for formatting qual codes per pressure range")
return
else:
q_df = pd.DataFrame(index=np.arange(len(inMat)), columns=p_qual_col)
for pq in p_qual_col:
if pq in list(qual_one):
q_df[pq] = q_df[pq].fillna(1)
elif oxy_fit and pq is column:
q_df[pq] = q_df[pq].fillna(2)
else:
q_df[pq] = q_df[pq].fillna(2)
q_nd = q_df.as_matrix(columns=q_df.columns)
return q_nd
def formatTimeEpoc(time_zone='UTC', time_pattern='%Y-%m-%d %H:%M:%S', input_time = None):
"""formatTimeEpoc function
Function takes pattern of time input, relative time zone, and
date time data array and returns array of epoc time.
title and the second row are the units for each column.
Args:
param1 (str): relative time zone for data.
param2 (str): pattern of incoming data.
param3 (ndarray): input_time, numpy 1d ndarray time array
Returns:
1D ndarray: The return array of epoch time
"""
if input_time is None:
print("In formatTimeEpoc: No data entered.")
return
else:
os.environ['TZ'] = 'UTC'
epoch_time = input_time
for i in range(0,len(input_time)):
epoch_time[i] = int(time.mktime(time.strptime(str(input_time[i], "utf-8"), time_pattern)))
return epoch_time
def dataToDataFrame(inFile):
"""dataToDataFrame function
Function takes full file path to csv type data file and returns a
PANDAS dataframe for data treatment with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): Full path to data file.
Returns:
DataFrame: The return value is a full dataframe with header.
.. REF PAGE:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
"""
#df = pd.read_csv(inFile, header=[0,2])
df = pd.read_csv(inFile)
return df
def dataToNDarray(inFile, dtype=None, names=None, separator=',', skip=None):
"""dataToNDarray function
Function takes full file path to csv type data file and returns NUMPY
ndarray type ndarray for data manipulation with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): inFile, full path to csv file
param2 (arr): dtype list
param3 (str): separator, default comma ','
Returns:
Narray: The return value is a full data ndarray with two row header.
Reference Page:
https://scipy.github.io/old-wiki/pages/Cookbook/InputOutput.html
"""
try:
return pd.read_pickle(inFile).to_records()
except:
if skip is None:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names)
else:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names, skip_header=skip)
return arr
def hysteresis_correction(H1=-0.033, H2=5000, H3=1450, inMat = None):
"""Hysteresis Correction function
Function takes data ndarray and hysteresis coefficiants
and returns hysteresis corrected oxygen data.
Args:
param1 (float): H1, hysteresis correction coefficiant 1
param2 (float): H2, hysteresis correction coefficiant 2
param3 (float): H3, hysteresis correction coefficiant 3
param5 (array): inMat, raw ctd data.
Returns:
array: Return dissolved oxygen hysteresis corrected data.
.. REF PAGE:
http://http://www.seabird.com/document/an64-3-sbe-43-dissolved-oxygen-do-sensor-hysteresis-corrections
"""
Oxnewconc = np.arange(0,len(inMat),1)
Oxnewconc[0] = inMat['o1_mll'][1]
if inMat is None:
print("Hysteresis Correction function: No data")
return
else:
for i in range(1,len(inMat)-1):
D = 1 + H1 * (math.exp(inMat['p_dbar'][i] / H2) - 1)
C = math.exp(-1 * 0.04167/ H3)
Oxnewconc[i] = ((inMat['o1_mll'][i] + (Oxnewconc[i-1] * C * D)) - (inMat['o1_mll'][i-1] * C)) / D
inMat['o1_mll'][:] = Oxnewconc[:]
return inMat
def data_interpolater(inArr):
"""data_interpolater to handle indices and logical indices of NaNs.
Input:
- inArr, 1d numpy array with return True np.isnans()
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
- interpolated array
Example:
>>> # linear interpolation of NaNs
>>> outArray = data_interpolater(inArr)
"""
nans, tmp= np.isnan(inArr), lambda z: z.nonzero()[0]
inArr[nans] = np.interp(tmp(nans), tmp(~nans), inArr[~nans])
return inArr
def o2pl2pkg(p_col, t_col, sal_col, dopl_col, dopkg_col, lat_col, lon_col, inMat):
"""o2pl2pkg convert ml/l dissolved oxygen to umol/kg
Input:
- t_col, temperature column header deg c.
- sal_col, salinity column header psu.
- dopl_col, dissolved column header ml/l.
- dopkg_col, dissolved column header umol/kg
- lat_col, latitude for entire cast deg.
- lon_col, longitude for entire cast deg.
- inMat, dtype ndarray processed ctd time data.
Output:
- Converted Oxygen column umol/kg
Example:
>>> # linear interpolation of NaNs
>>> outArray = o2pl2kg(inArr)
"""
pkg = np.ndarray(shape=len(inMat), dtype=[(dopkg_col, np.float)])
# Absolute sailinity from Practical salinity.
SA = gsw.SA_from_SP(inMat[sal_col], inMat[p_col], inMat[lat_col], inMat[lon_col])
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, inMat[t_col], inMat[p_col])
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
# Convert DO ml/l to umol/kg
for i in range(0,len(inMat[dopl_col])):
pkg[i] = inMat[dopl_col][i] * 44660 / (s0[i] + 1000)
return pkg
def oxy_to_umolkg(df_sal, df_pressure, df_lat, df_lon, df_temp, df_oxy):
'''Rewritten from Courtney's method to use array-likes (aka use dataframes and ndarrays).
'''
# Absolute salinity from Practical salinity.
SA = gsw.SA_from_SP(df_sal, df_pressure, df_lat, df_lon)
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, df_temp, df_pressure)
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
series = df_oxy * 44660 / (s0 + 1000)
return series
def raw_ctd_filter(input_array=None, filter_type='triangle', win_size=24, parameters=None):
"""raw_ctd_filter function
Function takes NUMPY array
of raw ctd data and returns filtered data. This function also needs
one of three filter types (boxcar, gaussian, triangle) as well as
window size.
Args:
param1 (ndarray): Numpy ndarray with predefined header with at
param2 (str): One of three tested filter types
boxcar, gaussian_std, triangle.
default is triangle
param3 (int): A window size for the filter. Default is 24, which
is the number of frames per second from a SBE9+/11 CTD/Dech unit.
param4 (ndarray): parameters the dtype names used in filtering the
analytical inputs.
Returns:
Narray: The return value is a matrix of filtered ctd data with
the above listed header values.
"""
if input_array is None:
print("In raw_ctd_filter: No data array.")
return
else:
return_array = input_array
if parameters is None:
print("In raw_ctd_filter: Empty parameter list.")
else:
for p in parameters:
if filter_type is 'boxcar':
win = sig.boxcar(win_size)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/len(win)
elif filter_type is 'gaussian':
sigma = np.std(arr)
win = sig.general_gaussian(win_size, 1.0, sigma)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/(len(win))
elif filter_type is 'triangle':
win = sig.triang(win_size)
return_array[p] = 2*sig.convolve(input_array[p], win, mode='same')/len(win)
return return_array
def ondeck_pressure(stacast, p_col, c1_col, c2_col, time_col, inMat=None, conductivity_startup=20.0, log_file=None):
"""ondeck_pressure function
Function takes full NUMPY ndarray with predefined dtype array
of filtered ctd raw data the stores, analizes and removes ondeck
values from data.
Args:
param1 (str): stacast, station cast info
param1 (str): p_col, pressure data column name
param2 (str): c1_col, cond1 data column name
param3 (str): c2_col, cond2 data column name
param4 (str): time_col, time data column name
param5 (ndarray): numpy ndarray with dtype array
param6 (float): conductivity_startup, threshold value
param7 (str): log_file, log file name
Returns:
Narray: The return ndarray with ondeck data removed.
Also output start/end ondeck pressure.
"""
start_pressure = []
tmpMat = []
outMat = []
tmp = 0
start_p = 0.0
n = 0
ep = []
end_p = 0.0
# Frequency
fl = 24
fl2 = fl*2
# One minute
mt = 60
# Half minute
ms = 30
time_delay = fl*ms
if inMat is None:
print("Ondeck_pressure function: No data.")
return
else:
# Searches first quarter of matrix, uses conductivity
# threshold min to capture startup pressure
for j in range(0,int(len(inMat)/4)):
if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
tmp = j
start_pressure.append(inMat[p_col][j])
# Evaluate starting pressures
if not start_pressure: start_p = "Started in Water"
else:
n = len(start_pressure)
if (n > time_delay): start_p = np.average(start_pressure[fl2:n-(time_delay)])
else: start_p = np.average(start_pressure[fl2:n])
# Remove on-deck startup
inMat = inMat[tmp:]
tmp = len(inMat);
# Searches last half of NDarray for conductivity threshold
if len(inMat) % 2 == 0:
inMat_2 = inMat.copy()
else:
inMat_2 = inMat.iloc[1:].copy()
inMat_half1, inMat_half2 = np.split(inMat_2,2)
ep = inMat_half2[(inMat_half2[c1_col] < conductivity_startup) & (inMat_half2[c2_col] < conductivity_startup)][p_col]
# for j in range(int(len(inMat)*0.5), len(inMat)):
# if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
# ep.append(inMat[p_col][j])
# if (tmp > j): tmp = j
# Evaluate ending pressures
if (len(ep) > (time_delay)): end_p = np.average(ep[(time_delay):])
else: end_p = np.average(ep[(len(ep)):])
# Remove on-deck ending
outMat = inMat[:tmp]
# Store ending on-deck pressure
report_ctd.report_pressure_details(stacast, log_file, start_p, end_p)
return outMat
def _roll_filter(df, pressure_column="CTDPRS", direction="down"):
#fix/remove try/except once serialization is fixed
try:
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
except KeyError:
pressure_column = 'CTDPRS'
if direction == 'down':
monotonic_sequence = df[pressure_column].expanding().max()
elif direction == 'up':
monotonic_sequence = df[pressure_column].expanding().min()
else:
raise ValueError("direction must be one of (up, down)")
return df[df[pressure_column] == monotonic_sequence]
def roll_filter(inMat, p_col, up='down', frames_per_sec=24, search_time=15, **kwargs):
"""roll_filter function
Function takes full NUMPY ndarray with predefined dtype array
and subsample arguments to return a roll filtered ndarray.
Args:
param1 (str): stacast, station cast info
param2 (ndarray): inMat, numpy ndarray with dtype array
param3 (str): up, direction to filter cast (up vs down)
param4 (int): frames_per_sec, subsample selection rate
param5 (int): seach_time, search time past pressure inversion
Returns:
Narray: The return value ndarray of data with ship roll removed
"""
#When the "pressure sequence" code is fixed, uncomment and use this instead
start = kwargs.get("start", 0)
end = kwargs.get("end", -1)
full_matrix = kwargs.get("full_matrix", inMat)
tmp_df = pd.DataFrame.from_records(full_matrix[start:end])
tmp_df = _roll_filter(tmp_df)
#return tmp_df.to_records(index=False)
return tmp_df
remove = []
frequency = 24 # Hz of package
if (frames_per_sec > 0) & (frames_per_sec <= 24):
sample = int(frequency/frames_per_sec) # establish subsample rate to time ratio
else: sample = frequency
# Adjusted search time with subsample rate
search_time = int(sample*frequency*int(search_time))
if inMat is None:
print("Roll filter function: No input data.")
return
else:
P = inMat[p_col]
dP = np.diff(P,1)
if up is 'down':
index_to_remove = np.where(dP < 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] > P[i+1]:
deltaP = P[i+1] + abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
elif up is 'up':
index_to_remove = np.where(dP > 0)[0] # Differential filter
subMat = np.delete(inMat, index_to_remove, axis=0)
P = subMat[p_col]
tmp = np.array([])
for i in range(0,len(P)-1):
if P[i] < P[i+1]:
deltaP = P[i+1] - abs(P[i] - P[i+1])
# Remove aliasing
k = np.where(P == min(P[i+1:i+search_time], key=lambda x:abs(x-deltaP)))[0]
tmp = np.arange(i+1,k[0]+1,1)
remove = np.append(remove,tmp)
deltaP = 0
subMat = np.delete(subMat,remove,axis=0)
return subMat
def pressure_sequence(df, p_col='CTDPRS', intP=2.0, startT=-1.0, startP=0.0, up='down', sample_rate=12, search_time=15):
"""pressure_sequence function
Function takes a dataframe and several arguments to return a pressure
sequenced data ndarray.
Pressure sequencing includes rollfilter.
Necessary inputs are input Matrix (inMat) and pressure interval (intP).
The other inputs have default settings. The program will figure out
specifics for those settings if left blank.
Start time (startT), start pressure (startP) and up are mutually exclusive.
If sensors are not not fully functional when ctd starts down cast
analyst can select a later start time or start pressure but not both.
There is no interpolation to the surface for other sensor values.
'up' indicates direction for pressure sequence. If up is set startT and startP
are void.
Args:
param1 (Dataframe: Dataframe containing measurement data
param2 (str): p_col, pressure column name
param3 (float): starting pressure interval
param5 (float): start time (startT) for pressure sequence
param6 (float): start pressure (startP) for pressure sequence
param7 (str): pressure sequence direction (down/up)
param8 (int): sample_rate, sub sample rate for roll_filter. Cleans & speeds processing.
param9 (int): search_time, truncate search index for the aliasing part of ship roll.
param10 (ndarray): inMat, input data ndarray
Returns:
Narray: The return value is a matrix of pressure sequenced data
todo: deep data bin interpolation to manage empty slices
"""
# change to take dataframe with the following properties
# * in water data only (no need to find cast start/end)
# * The full down and up time series (not already split since this method will do it)
# New "algorithm" (TODO spell this right)
# * if direction is "down", use the input as is
# * if direction is "up", invert the row order of the input dataframe
# Use the "roll filter" method to get only the rows to be binned
# * the roll filter will treat the "up" part of the cast as a giant roll to be filtered out
# * the reversed dataframe will ensure we get the "up" or "down" part of the cast
# * there is no need to reverse the dataframe again as the pressure binning process will remove any "order" information (it doesn't care about the order)
# That's basically all I (barna) have so far TODO Binning, etc...
# pandas.cut() to do binning
#lenP, prvPrs not used
# Passed Time-Series, Create Pressure Series
start = 0
# Roll Filter
roll_filter_matrix = roll_filter(df, p_col, up, sample_rate, search_time, start=start)
df_roll_surface = fill_surface_data(roll_filter_matrix, bin_size=2)
#bin_size should be moved into config
binned_df = binning_df(df_roll_surface, bin_size=2)
binned_df = binned_df.reset_index(drop=True)
return binned_df
def binning_df(df, **kwargs):
'''Bins records according to bin_size, then finds the mean of each bin and returns a df.
'''
bin_size = kwargs.get("bin_size", 2)
try:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS_DBAR'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS_DBAR'], range(0,int(np.ceil(df['CTDPRS_DBAR'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS_DBAR'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
except KeyError:
labels_in = [x for x in range(0,int(np.ceil(df['CTDPRS'].max())),2)]
df['bins'] = pd.cut(df['CTDPRS'], range(0,int(np.ceil(df['CTDPRS'].max()))+bin_size,bin_size), right=False, include_lowest=True, labels=labels_in)
df['CTDPRS'] = df['bins'].astype('float64')
df_out = df.groupby('bins').mean()
return df_out
def fill_surface_data(df, **kwargs):
'''Copy first scan from top of cast, and propgate up to surface
'''
surface_values = []
bin_size = kwargs.get("bin_size", 2)
try:
for x in range(1, int(np.floor(df.iloc[0]['CTDPRS_DBAR'])), bin_size):
surface_values.append(x)
df_surface = pd.DataFrame({'CTDPRS_DBAR': surface_values})
df_surface['interp_bol'] = 1
df_merged = | pd.merge(df_surface, df, on='CTDPRS_DBAR', how='outer') | pandas.merge |
import pandas as pd
import numpy as np
class PreProcessing:
data = None
quarter_names = None
num_years = None
num_days = None
def __init__(self, name):
name= str(name)
self.get_data(name)
self.data['Normalized_Close'] = self.normalized_data_col(self.data)
self.data['Quarter'] = self.get_quarter_col(self.data)
self.num_days = 252
self.prices_by_year = self.get_prices_by_year()
self.quarter_length = int(self.num_days / 4)
def get_prices_by_year(self):
df = self.modify_first_year_data()
for i in range(1, len(self.num_years)):
df = pd.concat([df, pd.DataFrame(self.get_year_data(year=self.num_years[i], normalized=True))], axis=1)
df = df[:self.num_days]
quarter_col = []
num_days_in_quarter = self.num_days // 4
for j in range(0, len(self.quarter_names)):
quarter_col.extend([self.quarter_names[j]]*num_days_in_quarter)
quarter_col = pd.DataFrame(quarter_col)
df = | pd.concat([df, quarter_col], axis=1) | pandas.concat |
from world_viewer.world import World
import pandas as pd
import numpy as np
import warnings
# from sensible_raw.loaders import loader
import json
from math import ceil
import os
os.environ['R_HOME'] = '/home/<EMAIL>/master/lib/R'
class CNSWorld(World):
PICKLE_PATH = './pickle/' # path for cached data
RELATION_NET_PICKLE = 'CNS_relation_net'
OPINIONS_PICKLE = 'CNS_opinions'
LIKE_MINDEDNESS_PICKLE = 'CNS_like_mindedness'
CNS_TIME_BEGIN = pd.Timestamp(pd.datetime(2013, 9, 2)) # first timestamp
CNS_TIME_END = pd.Timestamp(pd.datetime(2014, 12, 31)) # last timestamp
sigma = pd.to_timedelta(3, unit='d').total_seconds()
two_sigma_sqr = 2* sigma * sigma
def __init__(self, path='', start=pd.datetime(2013, 9, 2), end=pd.datetime(2014, 12, 31)):
super().__init__()
self.path = path
self.CNS_TIME_BEGIN = start
self.CNS_TIME_END = end
def load_world(self, opinions = ['smoking'], relation_agg = 2, read_cached = False, stop=False, write_pickle = True, continous_op = False):
self.name = "CNS" + '-'.join(opinions)
self.type = "CNS"
if continous_op:
warnings.warn("No comparison of continous opinions implementet yet!")
pickle_relation_net_filename = self.RELATION_NET_PICKLE \
+ "_" + str(relation_agg) \
+ ".pkl"
pickle_opinions_filename = self.OPINIONS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
pickle_like_mindedness_filename = self.LIKE_MINDEDNESS_PICKLE \
+ "_" + '-'.join(opinions) \
+ ".pkl"
## 0. Load time
#time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='W-MON'),columns=['time'])
time = pd.DataFrame(pd.date_range(self.CNS_TIME_BEGIN, self.CNS_TIME_END, freq='d'),columns=['time'])
self.time = time
## 1. Load opinions
if read_cached:
opinions_cached = False
try:
op_nodes = pd.read_pickle(self.PICKLE_PATH + pickle_opinions_filename)
opinions_cached = True
except FileNotFoundError:
warnings.warn("No cached opinions found, read opinions from file.")
opinions_cached = False
if not (read_cached and opinions_cached):
op_nodes = pd.DataFrame() # general opinion dataframe
if len(list(set(opinions) & set(["smoking","physical"]))) > 0:
op_data = pd.DataFrame() # df for loaded data
# load data
for survey in np.arange(1,4):
print('Load survey ' + str(survey))
data_s = loader.load_data("questionnaires", "survey_"+str(survey), as_dataframe=True)
data_s = data_s[data_s.user < 1000] #clean strange users
op_time = self._get_op_time(survey)
data_s = data_s.set_index('user').join(op_time)
data_s = data_s[data_s.time.astype('int') > 10]
data_s[data_s.time < self.CNS_TIME_BEGIN] = self.CNS_TIME_BEGIN
data_s[data_s.time > self.CNS_TIME_END] = self.CNS_TIME_END
data_s['survey'] = survey
data_s.reset_index(inplace=True)
op_data = pd.concat([op_data,data_s],sort=False)
#possibilitie that users filled out more than one questionaires in one week
op_data.drop_duplicates(['user','time','variable_name'], keep='last', inplace=True)
# process opinions
for opinion in opinions:
# load smoking opinions
if opinion == "smoking":
print("Process opinion data for variable: smoking")
opinion = "op_" + opinion
smoking = op_data[op_data.variable_name == b'smoke_freq'].copy()
smoking[opinion] = (smoking.response != b'nej_jeg_har_aldrig_r') \
& (smoking.response != b'nej_men_jeg_har_rget')
smoking.reset_index(inplace=True)
smoking = smoking[['user', 'time', opinion, 'survey' ]]
smoking.rename(columns={'user':'node_id'},inplace=True)
smoking = self._add_time_to_op_nodes(smoking, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = smoking
else:
op_nodes = op_nodes.set_index(['node_id','time']).join(smoking.set_index(['node_id','time']), how='outer')
op_nodes.reset_index(inplace=True)
# load physical opinions
elif opinion == "physical":
print("Process opinion data for variable: physical")
opinion = "op_" + opinion
physical = op_data[op_data.variable_name == b'physical_activity'].copy()
physical.response.replace(b'ingen',0,inplace=True)
physical.response.replace(b'ca__time_om_ugen',0,inplace=True)
physical.response.replace(b'ca_1-2_timer_om_ugen',1,inplace=True)
physical.response.replace(b'ca_3-4_timer_om_ugen',2,inplace=True)
physical.response.replace(b'ca_5-6_timer_om_ugen',3,inplace=True)
physical.response.replace(b'7_timer_om_ugen_elle',4,inplace=True)
physical.rename(columns={'response':opinion, 'user':'node_id'},inplace=True)
physical = physical[['node_id', 'time', opinion, 'survey' ]]
physical = self._add_time_to_op_nodes(physical, time, opinion)
# write into general dataframe
if op_nodes.empty:
op_nodes = physical
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(physical.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
elif opinion == "fitness":
print("Process opinion data for variable: fitness")
opinion = "op_" + opinion
fitness = pd.read_pickle('data/op_fitness.pkl').reset_index()
fitness = fitness[['node_id','time','op_fitness_abs']]
fitness = fitness.rename(columns={"op_fitness_abs":"fitness"})
fitness["op_fitness"] = 0
fitness.sort_values(['node_id', 'time'], inplace=True)
fitness = fitness[fitness.time >= self.CNS_TIME_BEGIN]
fitness = fitness[fitness.time <= self.CNS_TIME_END]
fitness.set_index('node_id', inplace=True)
fitness.reset_index(inplace=True)
# discretize opinion
fitness.loc[fitness.fitness >= 1, "op_fitness"] = True
fitness.loc[fitness.fitness < 1, "op_fitness"] = False
# write into general dataframe
if op_nodes.empty:
op_nodes = fitness
else:
op_nodes = op_nodes.set_index(['node_id','time','survey']) \
.join(fitness.set_index(['node_id','time','survey']), how='outer')
op_nodes.reset_index(inplace=True)
else:
raise ValueError('The opinion "' + opinion + '" is unknown.')
if write_pickle: op_nodes.to_pickle(self.PICKLE_PATH + pickle_opinions_filename)
#save opinions as instance variable
self.op_nodes = op_nodes
if stop: return 0
## 3. Load relation network
relations = pd.read_pickle("data/relations.pkl")
relations.reset_index(inplace=True)
relations = relations[relations.time >= self.CNS_TIME_BEGIN]
relations = relations[relations.time <= self.CNS_TIME_END]
# take only nodes for which the opinion is known
relations = relations[relations.id_A.isin(self.op_nodes.node_id)]
relations = relations[relations.id_B.isin(self.op_nodes.node_id)]
self.a_ij = relations[['id_A', 'id_B', 'time', 'edge']]
def _get_op_time(self, survey):
with open('user_scores'+str(survey)+'.json') as f:
op_time = json.load(f)
op_time = pd.DataFrame(op_time).loc['ts'].to_frame()
op_time.index.name = 'user'
op_time.reset_index(inplace=True)
op_time.user = op_time.user.astype('int')
op_time.set_index('user',inplace=True)
op_time.rename(columns={'ts':'time'},inplace=True)
op_time.time = pd.to_datetime(op_time.time, unit='s').dt.to_period('W').dt.to_timestamp()
return op_time
def load_edges_from_bluetooth2(self, proxi, time, verbose=True): #, threshold = None, verbose=True):
proxi = proxi.copy()
# take both directions id_A->id_B, id_B->id_A
proxi_inv = proxi.rename(columns={'id_A':'id_B','id_B':'id_A'})
proxi = pd.concat([proxi, proxi_inv], sort=False)
proxi.drop_duplicates(['id_A','id_B','time'],inplace=True)
# dont count edges twice
proxi = proxi[proxi.id_A < proxi.id_B]
proxi.time = proxi.time.dt.round('D')
# count encounters per day
proxi['encounter'] = 1
proxi = proxi.groupby(['id_A','id_B','time']).encounter.sum().reset_index()
print("before")
print(proxi)
#insert time steps with no recorded encounter
proxi = proxi.groupby(['id_A','id_B'])[['time','encounter']] \
.apply( lambda p: \
pd.DataFrame(p).set_index(['time']).join(time.set_index(['time']), how='outer') \
)
proxi.reset_index(inplace=True)
# fill unknown encounters with 0
proxi.fillna(0,inplace=True)
print("after")
print(proxi)
# weighted sum over a week
proxi = proxi.groupby(['id_A','id_B'])['time','encounter'].apply(self._calc_interaction)
proxi.reset_index(inplace=True)
proxi.time = | pd.to_datetime(proxi.time, unit='s') | pandas.to_datetime |
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
russian_stop = set(stopwords.words('russian'))
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
class LSA_app():
def __init__(self, conf='title', *args, **kwargs):
self.vec = TfidfVectorizer(ngram_range=(1,2), stop_words=russian_stop, *args, **kwargs)
self.conf = conf
def get_tfidf(self, train_df, test_df):
train_df[self.conf].fillna('NA', inplace=True)
test_df[self.conf].fillna('NA', inplace=True)
self.full_tfidf = self.vec.fit_transform(train_df[self.conf].values.tolist() + test_df[self.conf].values.tolist())
def get_df_tfidf(self, df):
return self.vec.transform(df[self.conf].values.tolist())
def apply_svd(self, n=100, *args, **kwargs):
self.svd_obj = TruncatedSVD(n_components=n)
self.svd_obj.fit(self.full_tfidf)
def get_svd(self, df, tfidf):
df_svd = pd.DataFrame(self.svd_obj.transform(tfidf))
df_svd.columns = ['SVD_'+self.conf+'_'+str(i+1) for i in range(self.svd_obj.n_components)]
return | pd.concat([df, df_svd], axis=1) | pandas.concat |
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
from datetime import timedelta
import glob
from scipy.stats import gamma
import sys
sys.path.insert(0,'model') # I hate this too but it allows everything to use the same helper functions.
from helper_functions import read_in_NNDSS
#Code taken from read_in_cases from Reff_functions. Preprocessing was not helpful for this situation.
def read_cases_lambda(case_file_date):
"""
Read in NNDSS data
"""
df_NNDSS = read_in_NNDSS(case_file_date)
df_interim = df_NNDSS[['date_inferred','STATE','imported','local']]
return(df_interim)
def tidy_cases_lambda(interim_data, remove_territories=True):
#Remove non-existent notification dates
interim_data = interim_data[~np.isnat(interim_data.date_inferred)]
#Filter out territories
if(remove_territories):
df_linel = interim_data[(interim_data['STATE']!='NT') & (interim_data['STATE']!='ACT')]
#Melt down so that imported and local are no longer columns. Allows multiple draws for infection date.
#i.e. create linelist data
df_linel = df_linel.melt(id_vars = ['date_inferred','STATE'], var_name = 'SOURCE',value_name='n_cases')
#Reset index or the joining doesn't work
df_linel = df_linel[df_linel.n_cases!=0]
df_linel = df_linel.reset_index(drop=True)
return(df_linel)
##gamma draws take arguments (shape, scale)
def draw_inf_dates(df_linelist, shape_rd=2.77, scale_rd=3.17, offset_rd=0,
shape_inc=5.807, scale_inc=0.948, offset_inc=1,nreplicates=1):
notification_dates = df_linelist['date_inferred']
nsamples = notification_dates.shape[0]
# DEFINE DELAY DISTRIBUTION
# mean_rd = 5.47
# sd_rd = 4.04
#scale_rd = shape_rd/(scale_rd)**2
#shape_rd = shape_rd/scale_rd
# DEFINE INCUBATION PERIOD DISTRIBUTION
# Taken from Lauer et al 2020
# mean_inc = 5.5 days
# sd_inc = 1.52
#scale_inc = (scale_inc)**2/shape_inc #scale**2 = var / shape
#shape_inc =(scale_inc)**2/scale_inc**2
#Draw from distributions - these are long vectors
inc_period = offset_inc+np.random.gamma(shape_inc, scale_inc, size = (nsamples*nreplicates))
rep_delay = offset_rd+np.random.gamma(shape_rd, scale_rd, size = (nsamples*nreplicates))
#infection date is id_nd_diff days before notification date. This is also a long vector.
id_nd_diff = inc_period + rep_delay
#Minutes aren't included in df. Take the ceiling because the day runs from 0000 to 2359. This can still be a long vector.
whole_day_diff = np.ceil(id_nd_diff)
time_day_diffmat = whole_day_diff.astype('timedelta64[D]').reshape((nsamples, nreplicates))
#Vector must be coerced into a nsamples by nreplicates array. Then each column must be subtracted from notification_dates.
#Subtract days off of notification dates.
notification_mat = np.tile(notification_dates, (nreplicates,1)).T #notification_dates is repeated as a column nreplicates times.
infection_dates = notification_mat - time_day_diffmat
#Make infection dates into a dataframe
datecolnames = [*map(str,range(nreplicates))]
infdates_df = pd.DataFrame(infection_dates,columns = datecolnames)
#Uncomment this if theres errors
#print([df_linelist.shape, infdates_df.shape])
#Combine infection dates and original dataframe
df_inf = pd.concat([df_linelist, infdates_df], axis=1, verify_integrity=True)
return(df_inf)
def index_by_infection_date(infections_wide):
datecolnames = [*infections_wide.columns[4:]]
df_combined = infections_wide[['STATE','SOURCE',datecolnames[0],'n_cases']].groupby(['STATE', datecolnames[0],'SOURCE']).sum()
#For each column (cn=column number): concatenate each sample as a column.
for cn in range(1,len(datecolnames)):
df_addin = infections_wide[['STATE','SOURCE',datecolnames[cn],'n_cases']].groupby(['STATE', datecolnames[cn],'SOURCE']).sum()
df_combined = pd.concat([df_combined,df_addin], axis=1, ignore_index = True)
#NaNs are inserted for missing values when concatenating. If it's missing, there were zero infections
df_combined[np.isnan(df_combined)]=0
#Rename the index.
df_combined.index.set_names(["STATE","INFECTION_DATE","SOURCE"], inplace=True)
#return(df_combined)
##INCLUDE ALL DAYS WITH ZERO INFECTIONS IN THE INDEX AS WELL.
# Reindex to include days with zero total infections.
local_infs = df_combined.xs('local',level='SOURCE')
imported_infs = df_combined.xs('imported',level='SOURCE')
statelist = [*df_combined.index.get_level_values('STATE').unique()]
#Should all states have the same start date? Current code starts from the first case in each state.
#For the same start date:
local_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
imported_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
#Determine start date as the first infection date for all.
#start_date = np.datetime64("2020-02-01")
start_date = df_combined.index.get_level_values('INFECTION_DATE').min()
#Determine end dates as the last infected date by state.
index_only = df_combined.index.to_frame()
index_only = index_only.reset_index(drop=True)
maxdates = index_only['INFECTION_DATE'].max()
for aus_state in statelist:
state_data = local_infs.xs(aus_state, level='STATE')
#start_date = state_data.index.min()
#dftest.index=dftest.reindex(alldates, fill_value=0)
alldates = pd.date_range(start_date, maxdates) #All days from start_date to the last infection day.
local_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
for aus_state in statelist:
state_data = imported_infs.xs(aus_state, level='STATE')
alldates = pd.date_range(start_date, maxdates)
imported_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
#Convert dictionaries to data frames
df_local_inc_zeros = pd.concat(local_statedict)
df_local_inc_zeros['SOURCE']='local'
df_imp_inc_zeros = pd.concat(imported_statedict)
df_imp_inc_zeros['SOURCE']='imported'
#Merge dataframes and reindex.
df_inc_zeros = pd.concat([df_local_inc_zeros, df_imp_inc_zeros])
df_inc_zeros = df_inc_zeros.reset_index()
df_inc_zeros= df_inc_zeros.groupby(['level_0',"level_1","SOURCE"]).sum()
df_inc_zeros.index = df_inc_zeros.index.rename(['STATE','INFECTION_DATE',"SOURCE"])
return(df_inc_zeros)
def generate_lambda(infection_dates, shape_gen=3.64/3.07, scale_gen=3.07,
trunc_days=21,shift=0, offset=1):
"""
Given array of infection_dates (N_dates by N_samples), where values are possible
number of cases infected on this day, generate the force of infection Lambda_t,
a N_dates-tau by N_samples array.
Default generation interval parameters taken from Ganyani et al 2020.
"""
from scipy.stats import gamma
#scale_gen = mean_gen/(sd_gen)**2
#shape_gen = mean_gen/scale_gen
xmids = [x+shift for x in range(trunc_days+1)] #Find midpoints for discretisation
gamma_vals = gamma.pdf(xmids, a=shape_gen, scale=scale_gen) #double check parameterisation of scipy
#renormalise the pdf
disc_gamma = gamma_vals/sum(gamma_vals)
ws = disc_gamma[:trunc_days]
#offset
ws[offset:] = disc_gamma[:trunc_days-offset]
ws[:offset] = 0
lambda_t = np.zeros(shape=(infection_dates.shape[0]-trunc_days+1, infection_dates.shape[1]))
for n in range(infection_dates.shape[1]):
lambda_t[:,n] = np.convolve(infection_dates[:,n], ws, mode='valid')
return lambda_t
def lambda_all_states(df_infection, **kwargs):
"""
Use geenrate lambda on every state
"""
statelist = [*df_infection.index.get_level_values('STATE').unique()]
lambda_dict ={}
for state in statelist:
df_total_infections = df_infection.groupby(['STATE','INFECTION_DATE']).agg(sum)
lambda_dict[state] = generate_lambda(
df_total_infections.loc[state].values,
**kwargs
)
return lambda_dict
def Reff_from_case(cases_by_infection, lamb, prior_a=1, prior_b=5, tau=7, samples=1000):
"""
Using Cori at al. 2013, given case incidence by date of infection, and the force
of infection \Lambda_t on day t, estimate the effective reproduction number at time
t with smoothing parameter \tau.
cases_by_infection: A T by N array, for T days and N samples
lamb : A T by N array, for T days and N samples
"""
csum_incidence = np.cumsum(cases_by_infection, axis = 0)
#remove first few incidences to align with size of lambda
# Generation interval length 20
csum_incidence = csum_incidence[20:,:]
csum_lambda = np.cumsum(lamb, axis =0)
roll_sum_incidence = csum_incidence[tau:, :] - csum_incidence[:-tau, :]
roll_sum_lambda = csum_lambda[tau:,:] - csum_lambda[:-tau,:]
a = prior_a + roll_sum_incidence
b = 1/(1/prior_b + roll_sum_lambda)
R = np.random.gamma(a,b) #shape, scale
#Need to empty R when there is too few cases...
#Use array inputs to output to same size
#inputs are T-tau by N, output will be T-tau by N
#
return a,b, R
def generate_summary(samples, dates_by='rows'):
"""
Given an array of samples (T by N) where rows index the dates,
generate summary statistics and quantiles
"""
if dates_by=='rows':
#quantiles of the columns
ax = 1
else:
#quantiles of the rows
ax = 0
mean = np.mean(samples, axis = ax)
bottom, lower, median, upper, top = np.quantile(samples,
(0.05, 0.25, 0.5, 0.75, 0.95),
axis =ax)
std = np.std(samples, axis = ax)
output = {
'mean':mean,
'std':std,
'bottom':bottom,
'lower':lower,
'median':median,
'upper':upper,
'top': top,
}
return output
def plot_Reff(Reff:dict, dates=None, ax_arg=None, truncate=None, **kwargs):
"""
Given summary statistics of Reff as a dictionary, plot the distribution over time
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
if ax_arg is None:
fig, ax = plt.subplots(figsize=(12,9))
else:
fig, ax = ax_arg
color_cycle = ax._get_lines.prop_cycler
curr_color = next(color_cycle)['color']
if dates is None:
dates = range(len(Reff['mean']))
if truncate is None:
ax.plot(dates, Reff['mean'], color= curr_color, **kwargs)
ax.fill_between(dates, Reff['lower'],Reff['upper'], alpha=0.4, color = curr_color)
ax.fill_between(dates, Reff['bottom'],Reff['top'], alpha=0.4, color= curr_color)
else:
ax.plot(dates[truncate[0]:truncate[1]], Reff['mean'][truncate[0]:truncate[1]], color= curr_color, **kwargs)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['lower'][truncate[0]:truncate[1]],
Reff['upper'][truncate[0]:truncate[1]],
alpha=0.4, color = curr_color)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['bottom'][truncate[0]:truncate[1]],
Reff['top'][truncate[0]:truncate[1]],
alpha=0.4, color= curr_color)
#plt.legend()
#grid line at R_eff =1
ax.set_yticks([1],minor=True,)
ax.set_yticks([0,2,3],minor=False)
ax.set_yticklabels([0,2,3],minor=False)
ax.yaxis.grid(which='minor',linestyle='--',color='black',linewidth=2)
ax.tick_params(axis='x', rotation = 90)
return fig, ax
def plot_all_states(R_summ_states,df_interim, dates,
start='2020-03-01',end='2020-08-01',save=True, date =None, tau = 7,
nowcast_truncation=-10):
"""
Plot results over time for all jurisdictions.
dates: dictionary of (region, date) pairs where date holds the relevant
dates for plotting cases by inferred symptom-onset
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
states = df_interim.STATE.unique().tolist()
states.remove('NT')
states.remove('ACT')
date_filter = pd.date_range(start=start,end=end)
#prepare NNDSS cases
df_cases = df_interim.groupby(['date_inferred','STATE']).agg(sum)
df_cases = df_cases.reset_index()
fig, ax = plt.subplots(nrows=2, ncols=3,
sharex=True, sharey=True,
figsize=(15,12)
)
for i,state in enumerate(states):
row = i//3
col = i%3
R_summary = R_summ_states[state]
#a,b,R = Reff_from_case(df_state_I.values,lambda_state,prior_a=1, prior_b=2, tau=tau)
#R_summary = generate_summary(R)
fig, ax[row,col] = plot_Reff(R_summary,
dates=dates[state],
ax_arg=(fig, ax[row,col]),
truncate=(0,nowcast_truncation),
label='Our Model')
fig, ax[row,col] = plot_Reff(R_summary,
dates=dates[state],
ax_arg=(fig, ax[row,col]),
truncate=(nowcast_truncation,None),
label='Nowcast')
#plot formatting
ax[row,col].set_title(state)
ax[row,col].set_ylim((0,4))
ax[row,col].set_xlim(( | pd.to_datetime(start) | pandas.to_datetime |
"""Tests cleaning module
"""
import numpy as np
import pandas as pd
from dsutils.cleaning import remove_duplicate_cols
from dsutils.cleaning import remove_noninformative_cols
from dsutils.cleaning import categorical_to_int
def test_remove_duplicate_cols():
"""Tests cleaning.remove_duplicate_cols"""
# Should remove duplicate cols
df = | pd.DataFrame() | pandas.DataFrame |
#%% [markdown]
# # Maggot connectome subset
#%%
import datetime
import logging
import time
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pymaid
import seaborn as sns
from graspologic.plot import adjplot
from pkg.data import DATA_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.utils import ensure_connected, select_lateral_nodes
FILENAME = "process_maggot"
DISPLAY_FIGS = True
OUT_PATH = DATA_PATH / "processed_split"
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
#%% [markdown]
# ## Start Catmaid instance on Virtual Fly Brain
#%%
pymaid.CatmaidInstance("https://l1em.catmaid.virtualflybrain.org/", None)
logging.getLogger("pymaid").setLevel(logging.WARNING)
pymaid.clear_cache()
#%% [markdown]
# ## Load the "papers" meta-annotation
#%%
def get_indicator_from_annotation(annot_name, filt=None):
ids = pymaid.get_skids_by_annotation(annot_name.replace("*", "\*"))
if filt is not None:
name = filt(annot_name)
else:
name = annot_name
indicator = pd.Series(
index=ids, data=np.ones(len(ids), dtype=bool), name=name, dtype=bool
)
return indicator
annot_df = pymaid.get_annotated("papers")
series_ids = []
for annot_name in annot_df["name"]:
print(annot_name)
indicator = get_indicator_from_annotation(annot_name)
if annot_name == "Imambocus et al":
indicator.name = "Imambocus et al. 2022"
series_ids.append(indicator)
annotations = | pd.concat(series_ids, axis=1, ignore_index=False) | pandas.concat |
import streamlit as st # front end
import pymongo # database connection
from pymongo import MongoClient # accessing the database url
import pandas as pd # dataframe operations
import pdfplumber # visual debugging and data extraction
import PyPDF2 # scan the resume pdf
from rake_nltk import Rake # keyword extraction algorithm
import string # string operations
import io # convert binary resume file into a decoded file that is readable by python
import re # regular expression
import nltk
import pymongo
import certifi
import ssl
nltk.download('stopwords')
nltk.download('punkt')
import lxml
def keyphrases(file, min_word, max_word, num_phrases): # extract phrases from the text
text = file
text = text.lower()
text = ''.join(s for s in text if ord(s) > 31 and ord(s) < 126) # use join function where it joins the characters which fallls in the range specified
text = text
text = re.sub(' +', ' ', text) # replaces multiple spaces with single space
text = text.translate(str.maketrans('', '', string.punctuation)) # maketrans extracts the punctuations and translate removes the maketrans returned punctuations from the whole text
text = ''.join([i for i in text if not i.isdigit()])
r = Rake(min_length=min_word, max_length=max_word) # input text for the keyword extraction
r.extract_keywords_from_text(text) # extract keywords
phrases = r.get_ranked_phrases()
if num_phrases < len(phrases):
phrases = phrases[0:num_phrases]
return phrases
country = st.sidebar.text_input('Country') # sidebar interface and input
uploaded_file = st.file_uploader('Upload your resume') # to upload the function
file_text = ''
phrases = []
if uploaded_file is not None:
uploaded_file.seek(0)
file = uploaded_file.read()
pdf = PyPDF2.PdfFileReader(io.BytesIO(file)) # convert the binary coded file to a python readable file
for page in range(pdf.getNumPages()):
file_text += (pdf.getPage(page).extractText())
phrases.extend(keyphrases(file_text, 2, 4, 10)) # join all the phrases together
if len(phrases) > 0:
q_terms = st.multiselect('Select key phrases', options=phrases, default=phrases) # interface of the box
client = pymongo.MongoClient("mongodb+srv://GopalMengi:<EMAIL>/companies_sorted?retryWrites=true&w=majority")
def query(country,keywords):
result = client['companies_sorted']['Companies'].aggregate([
{
'$search': {
'text': {
'path': [
'industry'
],
'query': [
' %s' % (keywords)
],
'fuzzy': {
'maxEdits': 2,
'prefixLength': 2
}
}
}
}, {
'$project': {
'Name': '$name',
# 'URL': '$domain',
'Industry': '$industry',
# 'University': '$Uni',
'City': '$locality',
'Country': '$country',
'score': {
'$meta': 'searchScore'
}
}
}, {
'$match': {
'Country': '%s' % (country)
}
}, {
'$limit': 10
}
])
df = | pd.DataFrame(result) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
| tm.assert_series_equal(s3 | s4, exp) | pandas.util.testing.assert_series_equal |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
| pd.Series(index=[10, 11, 12], dtype="float64") | pandas.Series |
import os
import gc
import time
import pandas as pd
from tqdm import tqdm
from copy import deepcopy
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from src.utils.transform import *
import warnings
warnings.filterwarnings("ignore")
class SynthesizedDatabaseCreator(object):
"""
"""
def __init__(self, example_number, synthesized_path_name, image_dims):
self.IMAGE_DIMS = image_dims
self.number_points = example_number
self.cpu_count = 4
self.synthesized_path_name = synthesized_path_name
def add_pattern(cur_x, add_x):
if cur_x == 1 and add_x == 2:
return add_x
else:
return cur_x
self.add_pattern = np.vectorize(add_pattern)
def load_template_map(image_dim):
template_path = 'input/template_wafer_map.pkl'
template = pd.read_pickle(template_path)
template = cv2.resize(template.waferMap.copy(), dsize=(image_dim[0], image_dim[1]),
interpolation=cv2.INTER_NEAREST)
# 2 - паттерн
# 1 - фон
# 0 - область, где нет ничего
template[template == 2] = 1
return template
self.template_map = load_template_map(self.IMAGE_DIMS)
def sawtooth_line(self, XC_, YC_, L0_, angle_, pattern_type, line_count=1, lam_poisson=0.2, save=False,
add_patterns=[None]):
size = XC_.shape[0]
synthesized_base = [None] * size
for n in tqdm(range(size)):
step = n
template = deepcopy(self.template_map)
if add_patterns[0]:
for pattern in add_patterns:
for img_pattern in pattern:
template = self.add_pattern(template, img_pattern)
COLOR_SCALE = 2
for repeate in range(line_count):
if repeate:
step = random.randint(0, size - 1)
# иниицализация параметров прямой
L0 = L0_[step]
XC = XC_[step]
YC = YC_[step]
angle = angle_[step]
# параметры уравнения
def delta_(x, y):
return int(math.sqrt(x ** 2 + y ** 2))
delta = np.vectorize(delta_)
L = L0 - np.sum(delta(XC, YC)[1:])
N = 200
x0, y0 = 0, 0
# кусочное построение пилообразной прямой
for i in range(XC.shape[0]):
# случайное удлинение или укорочение отрезка
rand = random.randint(-1, 0)
scale = 0.4
t = np.linspace(0, L // (line_count + rand * scale), N)
xc = XC[i]
yc = YC[i]
X = np.cos(angle[i]) * t + xc + x0
Y = np.sin(angle[i]) * t + yc + y0
X_ = np.around(X)
Y_ = np.around(Y)
x_prev, y_prev = x0, y0
x_first, y_first = 0, 0
for j in range(X_.shape[0]):
x = int(X_[j])
y = int(Y_[j])
if j == 0:
# первая точка прямой
x_first, y_first = x, y
try:
if template[x, y] == 1:
template[x, y] = COLOR_SCALE
x0, y0 = x, y
except IndexError:
break
# сшивка прямых
if i != 0:
# уравнение прямой сшивки
k = (y_prev - y_first) / (x_prev - x_first + 1e-06)
b = y_first - k * x_first
X = np.linspace(x_prev, x_first, 20)
Y = k * X + b
X_ = np.around(X)
Y_ = np.around(Y)
for j in range(X_.shape[0]):
x = int(X_[j])
y = int(Y_[j])
try:
if template[x, y] == 1:
template[x, y] = COLOR_SCALE
except IndexError:
break
synthesized_base[n] = [template, pattern_type]
# для презентации
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(template, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, n)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
@staticmethod
def add_noise(template, pattern_type, lam_poisson=0.2, dilate_time=1):
# расширение по соседу
is_dilate = random.randint(-1, 1)
if is_dilate == 1 or pattern_type == 'scratch':
kernel1 = np.ones((3, 3), np.uint8)
kernel2 = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
count_iter = random.randint(1, dilate_time)
template = cv2.dilate(template, kernel2, iterations=count_iter)
template = cv2.morphologyEx(template, cv2.MORPH_CLOSE, kernel2)
# внесем шум
noise_img = template.copy()
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на величину шума
r[r == 0] = 1
r[r > 2] = 2
noise_img[mask] = r[mask]
# расширение
# kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
# if pattern_type != 'scratch':
# noise_img = cv2.erode(noise_img, kernel, iterations=1)
return noise_img
def generator_scratch(self, mode=0, plot=False, line_count=1, add_patterns=[None], is_noised=False):
print('[INFO] Create scratches')
# число синтезированных карт
N_POINTS = self.number_points // 2
line_part = 5 # сегментов в одной линии
# суммарная длина отрезка
L0 = np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.45 * self.IMAGE_DIMS[0], size=N_POINTS)
# X координата старта прямой
xc = [np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)]
for _ in range(line_part - 1):
# смещение по x для старта следующей прямой
delta_xc = np.random.randint(0.01 * self.IMAGE_DIMS[0], 0.02 * self.IMAGE_DIMS[0] + 2, size=N_POINTS)
np.random.shuffle(delta_xc)
xc.append(delta_xc)
# merge под формат генератора
xc = np.array(xc).T
np.random.shuffle(xc)
# Y координата старта прямой
yc = [np.random.randint(0.3 * self.IMAGE_DIMS[0], 0.7 * self.IMAGE_DIMS[0], size=N_POINTS)]
for _ in range(line_part - 1):
# смещение по x для старта следующей прямой
delta_yc = np.random.randint(0.01 * self.IMAGE_DIMS[0], 0.02 * self.IMAGE_DIMS[0] + 2, size=N_POINTS)
np.random.shuffle(delta_yc)
yc.append(delta_yc)
# merge под формат генератора
yc = np.array(yc).T
np.random.shuffle(yc)
# углы наклона для каждого отрезка
angle = [np.random.randint(-50, 50, size=N_POINTS) * np.pi / 180]
for _ in range(line_part - 1):
part_angle = np.random.randint(30, 40, size=N_POINTS) * np.pi / 180 * np.sign(angle[0])
angle.append(part_angle)
angle = np.array(angle).T
np.random.shuffle(angle)
df_scratch_curved = None
if mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.sawtooth_line)(xc[i::n_workers], yc[i::n_workers],
L0[i::n_workers], angle[i::n_workers],
pattern_type='Scratch',
line_count=line_count,
add_patterns=add_patterns)
for i in range(n_workers))
df_scratch_curved = results[0]
for i in range(1, len(results)):
df_scratch_curved = pd.concat((df_scratch_curved, results[i]), sort=False)
if is_noised:
df_scratch_curved.waferMap = df_scratch_curved.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(15, 10))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_scratch_curved.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_scratch_curved
def create_rings(self, XC, YC, R_, PHI, N, pattern_type, lam_poisson=1.2, save=False, add_patterns=[None]):
color_scale = 2
size = XC.shape[0]
synthesized_base = [None] * size
for n in tqdm(range(size)):
# тестовый полигон
template = deepcopy(self.template_map)
if add_patterns[0]:
for pattern in add_patterns:
for img_pattern in pattern:
template = self.add_pattern(template, img_pattern)
# параметры кольца
phi = np.linspace(PHI[n][0], PHI[n][1], N[n])
r = np.linspace(R_[n][0], R_[n][1], N[n])
xc = XC[n]
yc = YC[n]
# синтез сетки
R, Fi = np.meshgrid(r, phi)
X = R * (np.cos(Fi)) + xc
Y = R * (np.sin(Fi)) + yc
X_ = np.around(X)
Y_ = np.around(Y)
# индексы для полигона
points = []
for i in range(X_.shape[0]):
for j in range(X_.shape[1]):
x = X_[i, j]
y = Y_[i, j]
points.append((x, y))
for idx in points:
i, j = idx
i = int(round(i))
j = int(round(j))
try:
if template[i, j] == 1:
template[i, j] = color_scale
except IndexError:
break
synthesized_base[n] = [template, pattern_type]
# для презентации
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(template, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, n)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
def generator_donut(self, mode=0, plot=False, add_patterns=None, is_noised=False):
print('[INFO] Create donuts')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(0 + 95 * i, 30 + 95 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(180 + 90 * i, 360 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.15 * self.IMAGE_DIMS[0], 0.3 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.33 * self.IMAGE_DIMS[0], 0.4 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_donut = None
if mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Donut',
add_patterns=add_patterns)
for i in range(n_workers))
df_donut = results[0]
for i in range(1, len(results)):
df_donut = pd.concat((df_donut, results[i]), sort=False)
if is_noised:
df_donut.waferMap = df_donut.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='donut',
lam_poisson=0.9,
dilate_time=4))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_donut.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_donut
def generator_loc(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create loc')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(95 * i, 55 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(65 + 90 * i, 95 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.1 * self.IMAGE_DIMS[0], 0.2 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.2 * self.IMAGE_DIMS[0], 0.25 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.45 * self.IMAGE_DIMS[0], 0.55 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_loc = None
if mode == 1:
# генератор для презенташки
df_loc = self.create_rings(XC, YC, r, PHI, N, pattern_type='Loc', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Loc',
add_patterns=add_patterns)
for i in range(n_workers))
df_loc = results[0]
for i in range(1, len(results)):
df_loc = pd.concat((df_loc, results[i]), sort=False)
if is_noised:
df_loc.waferMap = df_loc.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_loc.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_loc
def generator_center(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create center')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(95 * i, 10 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(45 + 90 * i, 95 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
# радиус внутреннего круга
r1 = np.random.randint(0.0 * self.IMAGE_DIMS[0], 0.05 * self.IMAGE_DIMS[0], size=N_POINTS)
# радиус внешнего круга
r2 = np.random.randint(0.12 * self.IMAGE_DIMS[0], 0.23 * self.IMAGE_DIMS[0], size=N_POINTS)
r = np.vstack((r1, r2))
# merge под формат генератора
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(0.48 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(0.48 * self.IMAGE_DIMS[0], 0.5 * self.IMAGE_DIMS[0], size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
if mode == 1:
# генератор для презенташки
df_center = self.create_rings(XC, YC, r, PHI, N, pattern_type='Center', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Center',
add_patterns=add_patterns)
for i in range(n_workers))
df_center = results[0]
for i in range(1, len(results)):
df_center = pd.concat((df_center, results[i]), sort=False)
if is_noised:
df_center.waferMap = df_center.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_center.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_center
def generator_edge_ring(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create edge_ring')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(0 + 90 * i, 30 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(320 + 90 * i,
360 * (i + 1), size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
center = 0.5 * self.IMAGE_DIMS[0]
r1 = np.random.randint(center - 4, center - 3, size=N_POINTS)
r2 = np.random.randint(center, center + 1, size=N_POINTS)
r = np.vstack((r1, r2))
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(center - 2, center, size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(center - 2, center, size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_edge_ring = None
if mode == 1:
# генератор для презенташки
df_edge_ring = self.create_rings(XC, YC, r, PHI, N, pattern_type='Edge-Ring', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Edge-Ring',
add_patterns=add_patterns)
for i in range(n_workers))
df_edge_ring = results[0]
for i in range(1, len(results)):
df_edge_ring = pd.concat((df_edge_ring, results[i]), sort=False)
if is_noised:
df_edge_ring.waferMap = df_edge_ring.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_edge_ring.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_edge_ring
def generator_edge_loc(self, mode=0, plot=False, add_patterns=[None], is_noised=True):
print('[INFO] Create edge_loc')
# число синтезированных карт
N_POINTS = self.number_points
PHI = None
for i in range(4):
# угол старта для сектора
phi1 = np.random.uniform(15 + 90 * i, 25 + 90 * i, size=N_POINTS // 4) * np.pi / 180
# угол конца для сектора
phi2 = np.random.uniform(55 + 90 * i, 115 + 90 * i, size=N_POINTS // 4) * np.pi / 180
phi = np.vstack((phi1, phi2))
# merge под формат генератора
phi = np.array([[phi[0, j], phi[1, j]] for j in range(phi.shape[1])])
if i == 0:
PHI = phi
else:
PHI = np.vstack((PHI, phi))
center = 0.5 * self.IMAGE_DIMS[0]
r1 = np.random.randint(center - 5, center - 3, size=N_POINTS)
r2 = np.random.randint(center, center + 1, size=N_POINTS)
r = np.vstack((r1, r2))
r = np.array([[r[0, i], r[1, i]] for i in range(r.shape[1])])
# X координата старта прямой
XC = np.random.randint(center - 2, center - 1, size=N_POINTS)
# Y координата старта прямой
YC = np.random.randint(center - 2, center - 1, size=N_POINTS)
# интесивность
N = np.random.randint(200, 210, size=N_POINTS)
df_edge_loc = None
if mode == 1:
# генератор для презенташки
df_edge_loc = self.create_rings(XC, YC, r, PHI, N, pattern_type='Edge-Loc', save=True)
elif mode == 0:
# генератор параллельный
n_workers = self.cpu_count
results = Parallel(n_workers)(
delayed(self.create_rings)(XC[i::n_workers], YC[i::n_workers],
r[i::n_workers], PHI[i::n_workers],
N[i::n_workers], pattern_type='Edge-Loc',
add_patterns=add_patterns)
for i in range(n_workers))
df_edge_loc = results[0]
for i in range(1, len(results)):
df_edge_loc = pd.concat((df_edge_loc, results[i]), sort=False)
if is_noised:
df_edge_loc.waferMap = df_edge_loc.waferMap.map(lambda wafer_map:
self.add_noise(wafer_map,
pattern_type='scratch',
lam_poisson=0.3))
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_edge_loc.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_edge_loc
def create_near_full(self, capacity, pattern_type, lam_poisson=1.2, save=False):
synthesized_base = [None] * capacity
for step in range(capacity):
# тестовый полигон
template = deepcopy(self.template_map)
# внесем шум
noise_img = deepcopy(template)
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на шумы
# r = np.around(r//np.max(r))
r[r == 0] = 1
r[r == 1] = 2
r[r > 2] = 1
noise_img[mask] = r[mask]
# сверткой расширим
kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
noise_img = cv2.erode(noise_img, kernel, iterations=1)
synthesized_base[step] = [noise_img, pattern_type]
# для презенташки
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(noise_img, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, step)
plt.savefig(path + name)
return pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType'])
def generator_near_full(self, plot=False):
print('[INFO] Create near_full')
# число синтезированных карт
N_POINTS = self.number_points
df_near_full = self.create_near_full(N_POINTS, pattern_type='Near-full', lam_poisson=1.3)
if plot:
fig, ax = plt.subplots(nrows=10, ncols=10, figsize=(10, 8))
ax = ax.ravel(order='C')
sample_idx = np.random.choice(N_POINTS, 100)
for i, idx in enumerate(sample_idx):
ax[i].imshow(df_near_full.waferMap.values[idx], cmap='inferno')
ax[i].axis('off')
fig.suptitle('Synthesized scratches')
plt.show()
else:
gc.collect()
return df_near_full
def create_random(self, capacity, pattern_type, lam_poisson=1.2, save=False):
synthesized_base = [None] * capacity
for step in tqdm(range(capacity)):
# тестовый полигон
template = deepcopy(self.template_map)
# внесем шум
noise_img = deepcopy(template)
mask = np.random.randint(0, 2, size=noise_img.shape).astype(np.bool)
mask[noise_img == 0] = False
r = np.random.poisson(lam=lam_poisson, size=noise_img.shape)
# нормировка на шумы
# r = np.around(r//np.max(r))
r[r == 0] = 1
r[r > 2] = 2
noise_img[mask] = r[mask]
# сверткой расширим
kernel = np.ones((3, 3), np.uint8)
kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
noise_img = cv2.morphologyEx(noise_img, cv2.MORPH_CLOSE, kernel)
noise_img = cv2.erode(noise_img, kernel, iterations=1)
synthesized_base[step] = [noise_img, pattern_type]
# для презенташки
if save:
path = 'output/test_classes/{}'.format(pattern_type)
try:
os.mkdir(path)
except OSError:
pass
plt.imshow(noise_img, cmap='inferno')
name = '/{}{}.jpg'.format(pattern_type, step)
plt.savefig(path + name)
return | pd.DataFrame(synthesized_base, columns=['waferMap', 'failureType']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.